query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Test user's if favourite restaurant is added to DB | def test_add_to_fav_(self):
result = self.client.post("/add_to_fav", data={"yelp_biz_id":"JA_V9TqDCrkgknqrcUndIQ",
"yelp_rest_name":"Siam", "yelp_rating":"4",
"yelp_category":"Thai", "yelp_price":"$$",
"yelp_image_url":"https://s3-media2.fl.yelpcdn.com/bphoto/1SkZwZrRZkQSzRMn_Trs3w/o.jpg" })
DB_result = Restaurant_details.query.filter_by(biz_id = "JA_V9TqDCrkgknqrcUndIQ").first()
self.assertIsNotNone(DB_result) #testing that the returned result is not NONE
self.assertEqual(DB_result.restaurant_name, 'Siam') #testing restaurant name is what it should be
self.assertIn(b"Your Favourite has been saved", result.data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_Favourite(self):\n self.assertEquals(self.fav_1.pk, 1)\n self.assertEquals(self.fav_1.date_added, '2019-12-20 09:00:00')\n self.assertEquals(self.fav_1.user.pk, 1)\n self.assertEquals(self.fav_1.product.pk, 1)",
"async def create(self, favorite: Favorite) -> Favorite:",
"def test_get(self, init_db, favorite):\n assert Favorite.get(favorite.id) == favorite",
"def favorite(user, wine):\n\n favorite = Favorite(user=user, wine=wine)\n\n db.session.add(favorite)\n db.session.commit()\n\n # return favorite",
"def update_favorites():\n\n check_favorite = Favorite.query.filter(Favorite.favorited_item==session[\"athlete_id\"]).first()\n route = f'/athletes/{session[\"athlete_id\"]}'\n\n if check_favorite is None:\n new_update = Favorite(id=current_user.id, favorited_item=session[\"athlete_id\"])\n db.session.add(new_update) \n \n else:\n db.session.delete(check_favorite)\n \n db.session.commit()\n \n return redirect(route)",
"def test_user_model(self):\n\n u = User(\n email=\"testy@test.com\",\n username=\"testuser\",\n password=\"HASHED_PASSWORD\",\n phone_number=None,\n image_url=None,\n )\n\n db.session.add(u)\n db.session.commit()\n\n self.assertEqual(len(u.favorites), 0)",
"def insert_in_favourite(self, food_id, substitute_id):\n\n ref = (food_id, substitute_id)\n print(\"\"\"\\n Souhaitez-vous ajouter cette recherche dans vos favoris ?\n 1. Oui\n 0. Non \"\"\")\n\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice == 1:\n self.cursor.execute(\"\"\"INSERT INTO favourite\n (food_id, substitute_id)\n VALUES (%s, %s)\"\"\", ref)\n else:\n return",
"def add_favourite(recipe_id):\r\n if \"user\" in session:\r\n user = coll_users.find_one(\r\n {\"username_lower\": session[\"user\"]})[\"_id\"]\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(user)},\r\n {\"$push\": {\"user_favs\": ObjectId(recipe_id)}})\r\n coll_recipes.update(\r\n {\"_id\": ObjectId(recipe_id)}, {\"$inc\": {\"favourites\": 1}})\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))",
"def favourite():\n\n # user is adding or deleting a favourite\n if request.method == \"POST\":\n\n # user is adding a station from 'stations.html'\n if request.form.get(\"add\"):\n\n # max limit of 5 favourites per user\n if len(Favourite.query.filter(Favourite.user_id==session[\"user_id\"]).all()) > 4:\n\n return redirect(url_for(\"stations\", error=\"limit\"))\n\n # remember id of station to add\n station_id = request.form.get(\"add\")\n\n # check user hasn't already favourited station\n if(Favourite.query.filter(Favourite.user_id==session[\"user_id\"],Favourite.station_id==station_id).first()):\n\n return redirect(url_for(\"stations\", error=\"taken\"))\n\n # add favourite to db for user\n addFav = Favourite(user_id=session[\"user_id\"],station_id=station_id)\n db.session.add(addFav)\n db.session.commit()\n\n return redirect(url_for(\"stations\", success=True))\n\n # user is deleting a station from 'favourites.html'\n elif request.form.get(\"delete\"):\n\n station_id = request.form.get(\"delete\")\n\n delFav = Favourite.query.filter(Favourite.user_id==session[\"user_id\"],Favourite.station_id==station_id).first()\n db.session.delete(delFav)\n db.session.commit()\n\n return redirect(url_for(\"favourite\", deleted=True))\n\n # user is viewing favourites via GET\n else:\n favourites = Favourite.query.filter(Favourite.user_id==session[\"user_id\"]).all()\n\n return render_template(\"favourites.html\", username=get_username(), favourites=favourites)",
"def add_favorite(request):\n print(\"La fonction pour ajouté un produit est appelé\")\n query = request.GET.get('_substitute_product','')\n print(query)\n # query_favorite = query.id\n query_name = Product.objects.get(name=query)\n print(query_name)\n print(\"ID DU PRODUIT\")\n username = request.user\n user_id = request.user.id\n # user = User.objects.get(id=username)\n print(username)\n print(\"ID DE L'USER\")\n if query_name is not None:\n try: \n UserFavorite.objects.get(user_name=username, product=query_name)\n print(\"Ce produit est déjà dans vos favoris.\")\n except ObjectDoesNotExist:\n new_favorite = UserFavorite.objects.create(user_name=username,product=query_name)\n new_favorite.save()\n print(\"Le produit a bien été enregistré.\")\n else:\n pass\n return redirect('favorits')\n # return render(request,'index.html')",
"def add_visit():\n\n # checks to see if user is logged in\n\n if session.get('username'):\n username = session['username']\n user = User.query.filter_by(username=username).first()\n\n # finds the friend searched for on the database\n friend = request.args.get(\"friend\")\n friend_user = User.query.filter_by(username=friend).first()\n\n when = request.args.get(\"when\")\n user_rating = Decimal(request.args.get(\"rating\"))\n\n # finds the restaurant's ID, adds the restaurant to the database if not in yet\n restaurant = request.args.get(\"name\")\n yelp_id = request.args.get(\"id\")\n avg_rating = request.args.get(\"avg_rating\")\n price_lvl = request.args.get(\"price\")\n review_count = request.args.get(\"rc\")\n categs = request.args.get(\"categs\")\n list_categs = categs.split(\",\")\n\n if not Restaurant.query.filter_by(name=restaurant).all():\n new_restaurant = Restaurant(yelp_id=yelp_id,\n name=restaurant,\n rating=avg_rating,\n price=turn_to_nums(price_lvl),\n review_count=review_count)\n db.session.add(new_restaurant)\n db.session.commit()\n\n rest_id = db.session.query(Restaurant.id).filter_by(yelp_id=yelp_id).first()[0]\n if not Category.query.filter_by(rest_id=rest_id).all():\n if len(list_categs) == 3:\n categ1, categ2, categ3 = list_categs\n elif len(list_categs) == 2:\n categ1, categ2 = list_categs\n categ3 = None\n else:\n categ1 = list_categs\n categ2 = None\n categ3 = None\n new_categs = Category(rest_id=rest_id,\n categ1=categ1,\n categ2=categ2,\n categ3=categ3)\n db.session.add(new_categs)\n db.session.commit()\n\n # Adding to the visits and uservisits tables\n new_visit = Visit(rest_id=rest_id, date=when)\n db.session.add(new_visit)\n db.session.commit()\n new_visit_id = db.session.query(Visit.id).filter_by(rest_id=rest_id,\n date=when).order_by(Visit.date.desc()).first()[0]\n new_visit_exp = UserExp(visit_id=new_visit_id,\n user_id=user.id,\n rating=user_rating)\n f_new_visit_exp = UserExp(visit_id=new_visit_id,\n user_id=friend_user.id)\n db.session.add(new_visit_exp)\n db.session.add(f_new_visit_exp)\n db.session.commit()\n return \" <span class='label label-success'>Saved!</span>\"\n\n # if not logged in, cannot save\n else:\n return \" <a href='/login'><span class='label label-default'>Login to save</span></a>\"",
"def save_to_favorites_list():\n\n #get show id from the event handler/post request\n show_id = str(request.form.get(\"id\"))\n #get button content from the event handler/post request\n button_content = request.form.get(\"button_content\")\n\n button_content_encoded = button_content.encode('utf-8')\n\n #save utf-8 encoded checkmark as a string variable\n check_mark = \"\\xe2\\x9c\\x93\"\n\n #find the current logged in user\n email = session.get(\"current_user\")\n\n if email:\n\n #use email to find the user_id\n user_id = User.find_user_id_with_email(email)\n\n #if the show has not been favorited yet\n if check_mark not in button_content_encoded:\n #add row in favorites table\n favorite = Favorite.add_to_favorites(show_id, user_id)\n\n #pass back the show_id and that the show has been favorited\n payload = {\"show_id\":show_id,\"favorite\":\"True\"}\n return jsonify(payload)\n else:\n #delete row in favorites table\n Favorite.delete_favorite(show_id)\n\n #pass back the show_id and that the show has been unfavorited\n payload = {\"show_id\":show_id,\"favorite\":\"False\"}\n return jsonify(payload)\n else:\n flash(\"You need to be logged in to see that page.\")\n return redirect(\"/login\")",
"def test_save(self, init_db, category1):\n params = {\n 'title' : fake.alphanumeric(15),\n 'description' : fake.alphanumeric(200),\n 'ranking' : 1,\n 'meta_data' : {\n 'color' : 'red',\n 'quantity' : 2,\n 'date_purchased' : '2019-02-05',\n 'condition' : 'bad'\n },\n 'category_id' : category1.id\n }\n\n favorite = Favorite(**params)\n assert favorite == favorite.save()",
"def insert_favorite_food(self, user_answer_id_food, name_substitute):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n save_favorite_food = \"\"\"INSERT INTO Favorite\n (id_food, id_substitute_chooses)\n VALUES({0}, \n (SELECT id FROM Food WHERE name_food = {1}))\"\"\" \\\n .format(int(user_answer_id_food),\n \"\\'\" + name_substitute + \"\\'\")\n self.cursor.execute(save_favorite_food)\n self.data_base.commit()",
"def favorite():\n # handle pre-flight for browsers CORS access\n if request.method == \"OPTIONS\":\n return generate_response()\n # part1: verify the token\n checked_and_verified, response = check_verify_token(request,login_session)\n if checked_and_verified == False: return response\n # handle the edge case where user is authorized to perform create user but not other method\n if not is_loggedin(login_session):\n response = generate_message(MESSAGE_USER_NOT_LOGGED_IN,401)\n return response\n # handles the get request\n if request.method == \"GET\":\n favorites = read_criteria(Favorite,{\"user_id\":login_session[\"user_id\"]},session,\"m\") or []\n favorites_room_json = [room_json(favorite.room, session,app.config[\"OFFLINE_TESTING\"], login_session) for favorite in favorites]\n return generate_response(elem={\"favorites\":favorites_room_json})\n # part2: check json, handle POST request\n checked_json, response, requested_json = check_json_form(request,MESSAGE_BAD_JSON,MESSAGE_GET_FAV_NO_JSON)\n if checked_json != True: return response\n # verify room id type, with strict mode\n requested_json[\"user_id\"] = login_session[\"user_id\"]\n correct_format,valid_update_pairs, response = process_request_json(Favorite,requested_json, True, access_mode=\"read\",nondb_type_map={\"action\":str})\n if correct_format == False: \n return response\n room = get_row_if_exists(Room, session, ** {\"id\": requested_json[\"room_id\"]})\n user = get_row_if_exists(User, session, ** {\"id\": login_session[\"user_id\"]})\n # if the room id in the request doesn't fit any entry in db, return error message\n if room is None:\n response = generate_message(MESSAGE_FAV_ROOM_NOT_EXIST,404)\n return response\n if requested_json[\"action\"] == \"add\":\n # the add favorite already handle duplicates add\n # it treats multiple adds as one add and every duplicate add afterwards is counted as success\n add_favorite(room,user, session)\n response = generate_message(MESSAGE_POST_FAV_ADD_SUCCESS,201)\n return response\n elif requested_json[\"action\"] == \"delete\":\n # the delete favorite already handle duplicates delete\n # it treats multiple delete as one delete and every duplicate delete afterwards is counted as success\n remove_entry(Favorite,requested_json[\"room_id\"], session)\n response = generate_message(MESSAGE_POST_FAV_DEL_SUCCESS,200)\n return response\n else: # method not supported\n response = generate_message(MESSAGE_POST_FAV_METHOD_NOT_SUPPORTED,405)\n return response",
"def test_display_favorite(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"s1925148\", result.data)",
"def add_to_fav(show_id, name):\n db = get_db()\n db.execute(\n 'INSERT INTO shows_users (show_id, user_id)'\n ' VALUES (?, ?)',\n (show_id, session['user_id'])\n )\n\n flash('\\\"%s\\\" has been successfully added to your favourite TV Shows!' % name)\n db.commit()\n return redirect(request.referrer)",
"def favourite_create(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['id'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# If someone tries to add themselves\n\t\tif data['id'] == sesh['thrower']['_id']:\n\t\t\treturn Services.Effect(False);\n\n\t\t# Make sure the thrower exists\n\t\tif not Thrower.exists(data['id']):\n\t\t\treturn Services.Effect(error=(1104, data['id']))\n\n\t\t# Add the thrower to the logged in thrower's favourites and return the\n\t\t#\tresult\n\t\treturn Services.Effect(\n\t\t\tFavourites.add(sesh['thrower']['_id'], data['id'])\n\t\t)",
"def testing_favourites(self, email, password, song):\n add_to_favourites = Favourites(self.driver, email, password, song)\n add_to_favourites.open_login_page()\n add_to_favourites.set_user_inputs()\n add_to_favourites.open_home_page()\n add_to_favourites.selecting_a_random_song()\n assert add_to_favourites.check_the_song_is_displayed() is True, 'Song is not displaying'\n add_to_favourites.add_to_wishlist()\n add_to_favourites.go_to_favourites_page()\n assert add_to_favourites.check_the_song_is_added_to_wishlist() == song, 'Not the song'",
"def add_testing_site():\n\n test_id = request.form.get('test_id')\n favorite = { \n 'status': None,\n }\n try:\n if 'user_id' in session:\n user_id = session['user_id']\n\n already_favorited = check_testing_saved_location_in_favorites(user_id, test_id)\n\n if already_favorited:\n favorite['status'] = 'already_favorited'\n flash('Already saved to favorites.')\n return jsonify(favorite)\n\n else:\n favorite['status'] = 'added'\n saved_location = create_testing_saved_locations(user_id, test_id) \n location = get_testing_location_by_test_id(test_id)\n flash(f'Testing Location {location.alternate_name} saved to profile!')\n return jsonify(favorite)\n\n else:\n flash('Please login to save a location!')\n\n except Exception as e:\n msg = f\"Error. Tried adding {test_id} to db failed: \\n {e}.\"\n return jsonify(msg) \n\n return jsonify('Success!')",
"def FoodCheckIn(sc, event):\n channel = sc.api_call('channels.info', channel=event['channel'])\n food = event['text'][9:]\n if food:\n if 'pizza' in food:\n sc.api_call('reactions.add', as_user='true', channel=event['channel'],\n timestamp=event['ts'], name='pizza')\n user = sc.api_call('users.info', user=event['user'])\n db = pymysql.connect(host='localhost', user='pizzabot', db='pizzachat')\n cursor = db.cursor()\n query = 'INSERT INTO foodlist (who, what) VALUES (%s, %s)'\n cursor.execute(query, (user['user']['name'], food.encode('utf-8')))\n db.commit()\n db.close()",
"def test_user_own_recipes(self):\n\n recipe1 = Recipe(uri=\"testuri\", name=\"testname\", image_url=\"test_image_url\", user_id=self.uid)\n recipe2 = Recipe(uri=\"testuri2\", name=\"testname2\", image_url=\"test_image_url2\")\n\n db.session.add_all([recipe1, recipe2])\n db.session.commit()\n\n self.assertEqual(recipe1.user_id, self.uid)\n self.assertNotEqual(recipe2.user_id, self.uid)\n self.assertEqual(recipe2.user_id, None)",
"def test_create_ingredient_successful(self):\n payload = {'name':'Cabbage'}\n self.client.post(INGREDIENTS_URL, payload)\n exists = Ingredient.objects.all().filter(user=self.user, name=payload['name']).exists\n self.assertTrue(exists)",
"def test_add_remove_from_wishlist(self):\n url = reverse('add-to-wishlist')\n data = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id\n }\n\n add = self.client.post(url, data, format='json')\n self.assertEqual(True, add.data['value'])\n\n remove = self.client.post(url, data, format='json')\n self.assertEqual(False, remove.data['value'])",
"def favorited(self: Article, request: Request):\n if not request.user:\n return False\n\n if self in request.user.favorites:\n return True\n\n return False",
"def setUp(self):\n self.prod_1 = Product.objects.create(\n pk=1,\n ean='3350033118072',\n name='test 1',\n category='cat 1',\n image='product_default.png',\n nutriscore='u'\n )\n\n self.user_1 = User.objects.create_user(\n pk=1,\n username='Fav Models Unit Test 1',\n email='boggusmail@boggusmail.net'\n )\n\n self.fav_1 = Favourite.objects.create(\n pk=1,\n date_added='2019-12-20 09:00:00',\n user=self.user_1,\n product=self.prod_1\n )",
"def favourite(self, favourite):\n\n self._favourite = favourite",
"def add_fav_drinks(self, user_id, drinks): \n assert type(user_id) == str\n assert type(drinks) == list\n\n fav_drinks = self.get_fav_drinks(user_id)\n user_check = self.users.get_user_name(user_id)\n drinks_check = [self.drinks.get_drinks_by_flavor_and_type(d.get('flavor'), d.get('type')) for d in drinks]\n\n # make sure that at least one drink exists in the list\n if all(x is None for x in drinks_check):\n print(\"All drinks provided do not exist. We will not add favorite drinks since one of the drinks must already exist.\")\n \n # user does not exist\n elif user_check is None: \n print(\"User Id {} does not exist.\".format(user_id))\n \n # add fav drinks\n else : \n # user has existing fav drinks\n if fav_drinks is not None:\n for d in drinks:\n # add the drink if it does not exist \n drink_id = self.drinks.add_drink(d.get('type'), d.get('flavor'))\n fav_drinks.append(drink_id)\n # user has no existing fav drinks\n else :\n ids = []\n for d in drinks:\n # add the drink if it does not exist \n ids.append(self.drinks.add_drink(d.get('type'), d.get('flavor')))\n\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": ids})",
"def test_create_ingredient(self):\n\n ingredient_payload = {'name': 'Test Ingredient'}\n self.client.post(URL_INGREDIENTS, ingredient_payload)\n\n is_ingredient_created = Ingredient.objects.filter(\n user=self.user,\n name=ingredient_payload['name']\n ).exists()\n\n self.assertTrue(is_ingredient_created)",
"def toggle_favorite(self, user, article, is_favoriting):\n if user not in article.favorited_by.all() and is_favoriting:\n article.favorited_by.add(user)\n if user in article.favorited_by.all() and not is_favoriting:\n article.favorited_by.remove(user)\n article.favoritesCount = article.favorited_by.all().count()\n article.save()"
] | [
"0.72114295",
"0.6636154",
"0.6632463",
"0.65658706",
"0.64831656",
"0.6482696",
"0.6476188",
"0.64745235",
"0.6445702",
"0.6381491",
"0.6374218",
"0.6319528",
"0.6261317",
"0.6236881",
"0.6179164",
"0.6172634",
"0.6163744",
"0.6161769",
"0.6146276",
"0.6124457",
"0.60840327",
"0.6082842",
"0.6063424",
"0.6043306",
"0.6040604",
"0.60332847",
"0.60214037",
"0.6017845",
"0.5996445",
"0.59783953"
] | 0.78729564 | 0 |
Get the eol mode map | def EOLModeMap():
# Maintenance Note: ints must be kept in sync with EDSTC_EOL_* in edstc
return { EOL_MODE_CR : _("Old Machintosh (\\r)"),
EOL_MODE_LF : _("Unix (\\n)"),
EOL_MODE_CRLF : _("Windows (\\r\\n)")} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_eol_for_open(self) -> str:\n map = {\n EOLTypes.CRLF: WINDOWS_EOL,\n EOLTypes.LF: UNIX_EOL,\n EOLTypes.NATIVE: linesep,\n }\n\n return map[self]",
"def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]",
"def getModeLookupTable(self):\n mode_table = []\n header = None\n for line in open(PublicTransit.MODE_LOOKUP_FILE_NAME):\n line = line.strip()\n if len(line) == 0: \n continue\n line = map(str.strip,line.split(\",\"))\n if header is None:\n header = line\n #CPT_AGENCYID\tAGENCYNAME\tCPT_MODE\tSCH_ROUTEDESIGNATOR\tMODECODE\tMODEGROUP\n continue\n data = {}\n for i in range(len(line)):\n data[header[i]] = line[i]\n mode_table.append(data)\n return mode_table",
"def _get_modes(self):\n return self.__modes",
"def get_modes(self, code_block):\r\n # FUCK YOU INDEX ERRORS, LIST COMPS, AND EVEN YOU LAMBDAS I DON'T NEED PRETTY\r\n # 0 = pos mode\r\n # 1 = imm mode\r\n modes, mode_codes = [0, 0], list(reversed(str(code_block[0])))[2:]\r\n x = 0\r\n for mode in mode_codes:\r\n modes[x] = int(mode)\r\n x += 1\r\n print('Get modes: ')\r\n print(modes)\r\n return modes",
"def get_modes(self):\n return self.circuit.get_modes()",
"def modes(self, exp_id: int) -> List[str]:\n return list(self.state[exp_id].keys())",
"def __mode_modesetid(self, mode):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tix = val.index(mode)\n\t\t\tif ix is not None:\n\t\t\t\treturn key, ix",
"def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError('Cannot locate modes in output.dat file.')\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.molecule['input'])) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n\n all_modes = [float(val) for val in structures]\n\n return array(all_modes)",
"def _get_applicable_modes(command):\n mode_dict = {}\n _add_applicable_modes(command, mode_dict)\n return mode_dict.keys()",
"def parse_session_mode_and_map(log_data):\n try:\n match = search(\n r\"<\\d{2}:\\d{2}> [^d]* Loading level \\w+\\/(\\w+), \\w+ (\\w+)\",\n log_data)\n line_map, line_mode = match.groups()\n return (line_mode, line_map)\n except Exception:\n print(\"Something is wrong with the log file!\")",
"def modes(self):\n return np.hstack(tuple(self.operator.modes))",
"def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open(\"output.dat\", \"r\") as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError(\"Cannot locate modes in output.dat file.\")\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.atoms)) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += (\n lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n )\n\n all_modes = [float(val) for val in structures]\n\n return np.array(all_modes)",
"def __convertEOL(self):\n aw = self.activeWindow()\n aw.convertEols(aw.eolMode())",
"def get_modes(self):\n modes = set()\n for er in self.exercise_recordings:\n if er.mode not in modes:\n modes.add(er.mode)\n return list(modes)",
"def modes(self) -> List[str]:\n return [m.name for m in self._modes]",
"def GetPackageModes(self):\n return self._modes",
"def values(self):\n return self._modes.values()",
"def get_focus_mode_names(self):\n names = []\n for focus_mode in self.focus_modes:\n names.append(focus_mode['modeName'])\n return names",
"def _modes(self):\n answer = []\n for i in dir(self):\n if i.startswith('handle_'):\n answer.append(i.replace('handle_', ''))\n return answer",
"def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)",
"def common_mode(self):\n return self._common_mode",
"def common_mode(self):\n return self._common_mode",
"def getmode(self, mode):\r\n modes = {}\r\n # core modes\r\n for m, (basemode, basetype, bands) in _MODEINFO.items():\r\n modes[m] = ModeDescriptor(m, bands, basemode, basetype)\r\n # extra experimental modes\r\n modes[\"RGBa\"] = ModeDescriptor(\"RGBa\",\r\n (\"R\", \"G\", \"B\", \"a\"), \"RGB\", \"L\")\r\n modes[\"LA\"] = ModeDescriptor(\"LA\", (\"L\", \"A\"), \"L\", \"L\")\r\n modes[\"La\"] = ModeDescriptor(\"La\", (\"L\", \"a\"), \"L\", \"L\")\r\n modes[\"PA\"] = ModeDescriptor(\"PA\", (\"P\", \"A\"), \"RGB\", \"L\")\r\n # mapping modes\r\n modes[\"I;16\"] = ModeDescriptor(\"I;16\", \"I\", \"L\", \"L\")\r\n modes[\"I;16L\"] = ModeDescriptor(\"I;16L\", \"I\", \"L\", \"L\")\r\n modes[\"I;16B\"] = ModeDescriptor(\"I;16B\", \"I\", \"L\", \"L\")\r\n # set global mode cache atomically\r\n _modes = modes\r\n return _modes[mode]",
"def line_styles (self):\n return self._line_styles",
"def get_map(self):\n\n self.mp = defaultdict(lambda : ord('x'))\n y, x = 0, 0\n while True:\n cond, output = self.ic()\n\n if cond: break\n # New row of the print out\n if output == 10:\n y += 1\n x = 0\n # Assign the value to the map\n else:\n self.mp[y,x] = output\n x += 1\n \n return self.mp",
"def get_keymap(self):\n return self.keymap",
"def keys(self):\n return self._modes.keys()",
"def _get_mode(self):\n raise NotImplementedError",
"def get_all_color_modes(self):\n return self._all_color_modes"
] | [
"0.63044417",
"0.6147848",
"0.5959693",
"0.5748074",
"0.57146144",
"0.5593236",
"0.54169565",
"0.54102844",
"0.53679585",
"0.5359293",
"0.5346539",
"0.5331634",
"0.53012353",
"0.5280056",
"0.52593654",
"0.52485204",
"0.51640564",
"0.5128611",
"0.51207393",
"0.51181734",
"0.5070815",
"0.50445783",
"0.50445783",
"0.5026303",
"0.5024813",
"0.5011286",
"0.4998007",
"0.4982108",
"0.49743688",
"0.49624553"
] | 0.808408 | 0 |
Loops through each page within a single PDB and sums up the stats of each page to arrive at the overall total | def analyze(directory, pdf_file, doc_type):
total_redaction_count = 0
total_redacted_text_area = 0
total_estimated_text_area = 0
total_estimated_num_words_redacted = 0
# Split the pdb (which is a pdf file) into individual jpgs.
redaction_module.pdf_to_jpg(directory, pdf_file)
os.chdir(directory)
for jpg_file in os.listdir(directory):
# Iterating through each page of the PDB
if jpg_file.endswith(".jpg"):
[redaction_count, redacted_text_area, estimated_text_area, estimated_num_words_redacted, potential, text_potential, type1, type2, type3] = redaction_module.image_processing(jpg_file, doc_type)
total_redaction_count += redaction_count
total_redacted_text_area += redacted_text_area
total_estimated_text_area += estimated_text_area
total_estimated_num_words_redacted += estimated_num_words_redacted
# Crucial clean-up of jpg files (Note: If files are not removed, code will NOT work properly).
os.remove(jpg_file)
# Now that we've gone through each page, we need to calculate the stats for the document.
if total_estimated_text_area != 0:
total_percent_text_redacted = float(total_redacted_text_area / total_estimated_text_area)
else:
total_percent_text_redacted = 0
data = []
# open csv file and write the stats in a single row representing the document.
with open('output.csv', mode='a+') as output:
output_writer = csv.writer(output, delimiter=',')
row = [pdf_file, total_redaction_count, total_percent_text_redacted, total_estimated_num_words_redacted]
data.append(row)
print(tabulate(data, headers=[" ", " ", " ", " ", " "]))
output_writer.writerow(row)
output.close() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def loop_example():\n\n totals = []\n\n for row in poke_stats:\n totals.append(sum(row))\n \n return(totals)",
"def stats_page():\n import alltheitems.stats\n return alltheitems.stats.index()",
"def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)",
"def scrape_central(page):\n soup = BeautifulSoup(page, 'html.parser')\n table = soup.find(\"table\", {\"class\" : \"ez1\"})\n rows = table.findAll('tr')\n page = int(table.find('tr', {'class': 'black'}).span.text)\n\n data_page = []\n for row in rows[1:]:\n item = {}\n cols = row.findAll('td')\n\n if len(cols) == 38:\n item['page'] = page\n item['state'] = cols[14].text.strip()\n item['district'] = cols[17].text.strip()\n item['village'] = cols[20].text.strip()\n item['proponent'] = cols[35].text.strip()\n item['proposal_no'] = cols[4].text.strip()\n item['file_no'] = cols[7].text.strip()\n item['proposal_name'] = cols[10].text.strip()\n item['sector'] = cols[34].text.strip()\n item['date_tor_apply'] = cols[24].text.strip()\n item['date_tor_granted'] = cols[27].text.strip()\n item['date_ec_receipt'] = cols[24].text.strip()\n item['date_ec_granted'] = cols[33].text.strip()\n clearance = cols[37].findAll('img', {'src': 'images/ec.png'})\n tor = cols[37].findAll('img', {'src': 'images/tor.png'})\n pfr = cols[37].findAll('img', {'src': 'images/pfr.png'})\n forms = cols[37].findAll('img', {'src': 'images/forms.png'})\n com = cols[37].findAll('img', {'src': 'images/com.png'})\n mon = cols[37].findAll('img', {'src': 'images/mon.png'})\n add = cols[37].findAll('img', {'src': 'images/add.png'})\n item['clearance_report'] = len(clearance)\n item['tor_report'] = len(tor)\n item['pf_report'] = len(pfr)\n item['form1'] = len(forms)\n item['compliance_report'] = len(com)\n item['monitor_report'] = len(mon)\n item['additional_report'] = len(add)\n data_page.append(item)\n \n\n if len(cols) == 29:\n item['page'] = page\n item['state'] = cols[14].text.strip()\n item['district'] = cols[17].text.strip()\n item['village'] = cols[20].text.strip()\n item['proponent'] = cols[26].text.strip()\n item['proposal_no'] = cols[4].text.strip()\n item['file_no'] = cols[7].text.strip()\n item['proposal_name'] = cols[10].text.strip()\n item['sector'] = cols[25].text.strip()\n item['date_tor_apply'] = None\n item['date_tor_granted'] = None\n item['date_ec_receipt'] = None\n item['date_ec_granted'] = cols[24].text.strip()\n clearance = cols[28].findAll('img', {'src': 'images/ec.png'})\n tor = cols[28].findAll('img', {'src': 'images/tor.png'})\n pfr = cols[28].findAll('img', {'src': 'images/pfr.png'})\n forms = cols[28].findAll('img', {'src': 'images/forms.png'})\n com = cols[28].findAll('img', {'src': 'images/com.png'})\n mon = cols[28].findAll('img', {'src': 'images/mon.png'})\n add = cols[28].findAll('img', {'src': 'images/add.png'})\n item['clearance_report'] = len(clearance)\n item['tor_report'] = len(tor)\n item['pf_report'] = len(pfr)\n item['form1'] = len(forms)\n item['compliance_report'] = len(com)\n item['monitor_report'] = len(mon)\n item['additional_report'] = len(add)\n data_page.append(item)\n \n return data_page",
"def main():\n href_list = fundlist.get_fund_list()\n\n single_values = None\n asset_allocations = None\n geo_allocations = None\n sector_allocations = None\n top10_holdings = None\n\n for href in href_list:\n url = 'http://idata.fundata.com' + href\n fund_profile = FundProfileScraper(url)\n\n value_dict = fund_profile.scrape_all_single_value()\n if single_values is None:\n single_values = pd.DataFrame([value_dict.values()],\n columns=value_dict.keys())\n else:\n temp_df = pd.DataFrame(value_dict.values(),\n columns=value_dict.keys())\n single_values.append(temp_df)\n\n asset_allocation_list = fund_profile.scrape_asset_allocation()\n allocations_with_href = [[href, asset_class]\n for asset_class in asset_allocation_list]\n if asset_allocations is None:\n asset_allocations = pd.DataFrame(\n allocations_with_href,\n columns=['href', 'asset_allocation']\n )\n else:\n temp_df = pd.DataFrame(\n allocations_with_href,\n columns=['href', 'asset_allocation']\n )\n asset_allocations.append(temp_df)\n\n\n geo_allocations_list = fund_profile.scrape_geo_allocation()\n geo_allocations_href = [[href, geo_class]\n for geo_class in geo_allocations_list]\n if geo_allocations is None:\n geo_allocations = pd.DataFrame(\n geo_allocations_href,\n columns=['href', 'geo_allocation']\n )\n else:\n temp_df = pd.DataFrame(\n geo_allocations_href,\n columns=['href', 'geo_allocation']\n )\n geo_allocations.append(temp_df)\n\n sector_allocations_list = fund_profile.scrape_sector_allocation()\n sector_allocations_href = [[href, sector_class]\n for sector_class in sector_allocations_list]\n if sector_allocations is None:\n sector_allocations = pd.DataFrame(\n sector_allocations_href,\n columns=['href', 'sector_allocation']\n )\n else:\n temp_df = pd.DataFrame(\n sector_allocations_href,\n columns=['href', 'sector_allocation']\n )\n sector_allocations.append(temp_df)\n\n top10_holding_list = fund_profile.scrape_top10_holdings()\n top10_holding_href = [[href, holding]\n for holding in top10_holding_list]\n if top10_holdings is None:\n top10_holdings = pd.DataFrame(\n top10_holding_href,\n columns=['href', 'holding']\n )\n else:\n temp_df = pd.DataFrame(\n top10_holding_href,\n columns=['href', 'holding']\n )\n top10_holdings.append(temp_df)\n\n time.sleep(randint(1, 5))\n\n single_values.to_pickle('./single_values.pkl')\n asset_allocations.to_pickle('./asset_allocations.pkl')\n geo_allocations.to_pickle('./geo_allocations.pkl')\n sector_allocations.to_pickle('sector_allocations.pkl')\n top10_holdings.to_pickle('top10_holdings.pkl')",
"def dataExtract(queryResults):\n days = ['MondayCollect',\n 'TuesdayCollect',\n 'WednesdayCollect',\n 'ThursdayCollect',\n 'FridayCollect',\n 'SaturdayCollect',\n 'SundayCollect']\n\n #counting the instances of bin collections\n parkCount = 0\n roadingCount = 0\n otherCount = 0\n\n #output totals of bin collections\n parkOutput = []\n roadingOutput = []\n otherOutput = []\n \n #iterate over each day\n for day in days:\n \n #iterate over the number of bins\n for i in range(len(queryResults)):\n \n #check if the bin was collected on the day...\n if str(queryResults[i]['attributes'][day]).strip().lower() == 'yes':\n \n #unknown formatting issue with the data, these lines fix it\n strResult = str(queryResults[i]['attributes']['Owner'])\n strResultForm = strResult.lower().strip()\n \n #update the counts if True\n if strResultForm == 'roading':\n roadingCount += 1\n elif strResultForm == 'parks':\n parkCount += 1\n elif strResultForm == 'private':\n otherCount += 1\n else:\n otherCount +=1\n\n #print \"Day: {} \\nparkCount: {} \\nroadingCount: {} \\notherCount: {} \\n\\n\".format(day,parkCount,roadingCount,otherCount)\n \n parkOutput.append(parkCount)\n roadingOutput.append(roadingCount)\n otherOutput.append(otherCount)\n \n parkCount = 0\n roadingCount =0\n otherCount =0\n \n return parkOutput,roadingOutput,otherOutput",
"def assemble_stats(lma_sum, mma_sum, hma_sum, peer_lma_sum, peer_mma_sum, peer_hma_sum):\n lma_pct = 0.0\n mma_pct = 0.0\n hma_pct = 0.0\n\n peer_lma_pct = 0.0\n peer_mma_pct = 0.0\n peer_hma_pct = 0.0\n\n stats = {}\n\n target_lar_total = lma_sum + mma_sum + hma_sum\n if target_lar_total:\n lma_pct = round(1.0 * lma_sum / target_lar_total, 3)\n mma_pct = round(1.0 * mma_sum / target_lar_total, 3)\n hma_pct = round(1.0 * hma_sum / target_lar_total, 3)\n maj_pct = round(mma_pct + hma_pct, 3)\n stats.update({\n 'lma': lma_sum, \n 'lma_pct': lma_pct, \n 'mma': mma_sum,\n 'mma_pct': mma_pct,\n 'hma': hma_sum,\n 'hma_pct': hma_pct,\n 'maj_pct': maj_pct,\n 'lar_total': target_lar_total\n })\n else:\n stats.update({\n 'lar_total': 0,\n 'lma': 0, \n 'lma_pct': 0, \n 'mma': 0,\n 'mma_pct': 0,\n 'hma': 0,\n 'hma_pct': 0\n })\n #assemble peer data\n peer_lar_total = peer_lma_sum + peer_mma_sum + peer_hma_sum\n if peer_lar_total:\n peer_lma_pct = round(1.0 * peer_lma_sum / peer_lar_total, 3)\n peer_mma_pct = round(1.0 * peer_mma_sum / peer_lar_total, 3)\n peer_hma_pct = round(1.0 * peer_hma_sum / peer_lar_total, 3)\n peer_maj_pct = round(peer_mma_pct + peer_hma_pct, 3)\n stats.update({\n 'peer_lma': peer_lma_sum, \n 'peer_lma_pct': peer_lma_pct, \n 'peer_mma': peer_mma_sum,\n 'peer_mma_pct': peer_mma_pct,\n 'peer_hma': peer_hma_sum,\n 'peer_hma_pct': peer_hma_pct,\n 'peer_maj_pct': peer_maj_pct,\n 'peer_lar_total': peer_lar_total\n })\n else:\n stats.update({\n 'peer_lma': 0,\n 'peer_lma_pct': 0, \n 'peer_mma': 0, \n 'peer_mma_pct': 0,\n 'peer_hma': 0,\n 'peer_hma_pct': 0,\n 'peer_lar_total': 0\n })\n odds_lma = odds_ratio(lma_pct, peer_lma_pct)\n odds_mma = odds_ratio(mma_pct, peer_mma_pct)\n odds_hma = odds_ratio(hma_pct, peer_hma_pct)\n odds_maj = odds_ratio(mma_pct+hma_pct, peer_mma_pct+peer_hma_pct)\n stats.update({\n 'odds_lma':odds_lma,\n 'odds_mma':odds_mma,\n 'odds_hma':odds_hma,\n 'odds_maj':odds_maj\n })\n return stats",
"def getAllPageNumbers(self):\n\t\tfor subpage in self.subpages:\n\t\t\thtmlcontent = self.HttpHandler.getHtmlContentFromLink(subpage.link)\n\t\t\tsoupPage = BeautifulSoup(htmlcontent, \"html.parser\")\n\t\t\tsubpage.setNbrPages( self.getNbrPages(soupPage) )",
"def ExamineAllEvents(self, do_print):\n total = 0.0\n for purno in self.data:\n event = self.data[purno]\n randomcountry = event.keys()[0]\n randomrow = event[randomcountry]\n total += self.GetTotal(randomrow)\n if do_print:\n print purno, randomrow[0], randomrow[2], randomrow[6]\n for country in event:\n print \" %s: %.2f%%\" % (\n country, self.GetCountryPercentage(event[country], country) * 100)\n return total",
"def yield_stats(go_analysis):\n for i in xrange(go_analysis.nrow()):\n yield go_analysis[0][i], go_analysis[1][i], go_analysis[2][i], go_analysis[3][i], p_value_from_r(go_analysis[4][i]), p_value_from_r(go_analysis[5][i])",
"def print_numa_stats(numafiles):\n for numafile in numafiles:\n numafile.seek(0)\n node_id = int(numafile.name[numafile.name.find(\"/node/node\")+10:-9])\n ts = int(time.time())\n stats = dict(line.split() for line in numafile.read().splitlines())\n for stat, tag in (# hit: process wanted memory from this node and got it\n (\"numa_hit\", \"hit\"),\n # miss: process wanted another node and got it from\n # this one instead.\n (\"numa_miss\", \"miss\")):\n print (\"sys.numa.zoneallocs %d %s node=%d type=%s\"\n % (ts, stats[stat], node_id, tag))\n # Count this one as a separate metric because we can't sum up hit +\n # miss + foreign, this would result in double-counting of all misses.\n # See `zone_statistics' in the code of the kernel.\n # foreign: process wanted memory from this node but got it from\n # another node. So maybe this node is out of free pages.\n print (\"sys.numa.foreign_allocs %d %s node=%d\"\n % (ts, stats[\"numa_foreign\"], node_id))\n # When is memory allocated to a node that's local or remote to where\n # the process is running.\n for stat, tag in ((\"local_node\", \"local\"),\n (\"other_node\", \"remote\")):\n print (\"sys.numa.allocation %d %s node=%d type=%s\"\n % (ts, stats[stat], node_id, tag))\n # Pages successfully allocated with the interleave policy.\n print (\"sys.numa.interleave %d %s node=%d type=hit\"\n % (ts, stats[\"interleave_hit\"], node_id))",
"def eliminating_loop_example():\n\n totals_comp = [sum(row) for row in poke_stats]\n\n return(totals_comp)",
"def collectStat(self, thread):\n\t\t# update average page load time\n\t\tif self.updated_count == 0:\n\t\t\tself.average_time = thread.load_time\n\t\telse:\n\t\t\tself.average_time = (self.average_time * self.updated_count + thread.load_time) / (self.updated_count + 1)\n\t\t# update stitistics by HTTP code\n\t\tif thread.code not in self.code_statistics:\n\t\t\tself.code_statistics[thread.code] = 1 \n\t\telse:\n\t\t\tself.code_statistics[thread.code] += 1\n\t\t# update count of processed pages\n\t\tself.updated_count += 1",
"def total(self):\n return self._evaluate()['hits']['total']",
"def SumaryPagos(vj):\n\n vj.PagosVenta = {}\n vj.MontoCobros = 0.0 # Sumatoria de todos los pagos reallizados\n\n for idPago, row in vj.tbPagos.rows.items(): # Recorre todos los pagos\n vj.MontoCobros += row.cuc # Acumula los pago en cuc\n vj.MontoCobros +=vj.Cnv( row.cup, MD.Cup, MD.Cuc ) # Acumula los pago en cup (convertido a cuc)\n\n idVent = row.idVent # Id de la venta a la que pertenece el pago\n if idVent not in vj.PagosVenta: # Si no hay pago para la venta \n vj.PagosVenta[idVent] = [] # Crea una lista vacia\n\n vj.PagosVenta[idVent].append(idPago) # Agrega el pago a la venta",
"def _calc_stats(self):\n\n for res in self.rsts:\n _LOG.info(\"Calculate statistics for '%s'\", res.reportid)\n res.calc_stats(regexs=self._stats_colnames, funcnames=self._stats_funcs)",
"def compute_statistics(self):",
"def gather_all_profiles(year, month):\n page = 1\n urls = []\n\n print(\"{}-{} : Begin indexing.\".format(year, month))\n\n while (page > 0):\n urlstring = \"http://scamdigger.com/{}/{}/page/{}\".format(year,month,page) \n jitter = random.choice([0,1])\n try:\n urlhandle = urlopen(urlstring)\n urls += enumerate_profiles(urlhandle, page)\n # time.sleep(1+jitter)\n page += 1\n except:\n page = 0\n\n print(\"{}-{} : {} profiles\".format(year,month,len(urls)))\n\n for url in urls:\n uid = url[30:-1]\n outfile=PROFILES+os.sep+uid+'.json'\n jitter = random.choice([0,1])\n try:\n urlhandle = urlopen(url)\n scrape_profile(urlhandle, outfile, year, month)\n # time.sleep(1+jitter)\n except Exception as e:\n print(\"Exception when handling {}\".format(url))\n print(e)\n \n print(\"{}-{} : complete.\".format(year,month))",
"def totals_map():\n totals_map = [*map(sum,poke_stats)]\n\n return(totals_map)",
"def getStats(population, masterList):\n for team in population:\n for i in range(13): #13 are the number of roster spots?\n team.totHr += masterList[team.roster[i]].hr\n team.totAvg += masterList[team.roster[i]].avg\n team.totRuns += masterList[team.roster[i]].runs\n team.totSb += masterList[team.roster[i]].sb\n team.totRbi += masterList[team.roster[i]].rbi\n if i == 12:\n team.totAvg = team.totAvg / 13\n return population",
"def core_stats():\n data = get_tsv_dataset(os.path.join(DATA_DIR, TOTALS_FILE))\n if data is None:\n return make_response(jsonify({'error': 'Data could not be read'}), 500)\n # parse up so we can manipulate things.\n dataset = [int(x) for x in data]\n annual_sightings = sum(dataset)\n # for each 'month' (selection of x4\n monthly_sightings = []\n max_sightings = 0\n max_month = 0\n\n # grab each month's data into its own list for post processing.\n # also calculate some other numbers as we go.\n for i in range(0, len(dataset), 4):\n # select 4x data points.\n this_month = dataset[i:i + 4]\n total_sightings_this_month = sum(this_month)\n monthly_sightings.append(total_sightings_this_month)\n old_max = max_sightings\n max_sightings = max(max_sightings, total_sightings_this_month)\n if old_max < max_sightings:\n # it could be the 0th month.\n max_month = len(monthly_sightings)\n\n mean_monthly_sightings = mean(monthly_sightings)\n month_name = list(calendar.month_name)[max_month]\n return make_response(jsonify({'annual_sightings': annual_sightings,\n 'max_sightings': max_sightings,\n 'max_sighting_month': month_name,\n 'mean_monthly_sightings': mean_monthly_sightings}), 200)",
"def legacy_pagecounts(project, start, end,\n access_site='all-sites', granularity='daily'):\n project_arg = 'all-projects'\n if project != 'all-projects':\n project_arg = '{}.org'.format(project)\n args = PC_ARGS.format(project=project_arg,\n start=start,\n end=end,\n access_site=access_site,\n granularity=granularity)\n return __api__(PC_ENDPOINT, args)",
"def get_statistics(self):\n statistics = {\n 'entry': 0,\n 'bandwidth': 0,\n 'exit': 0,\n 'pages': 0\n }\n downloads = statistics.copy()\n \n portal_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_portal_state'\n )\n context_state = getMultiAdapter(\n (self.context, self.request), name=u'plone_context_state'\n )\n site = portal_state.portal()\n \n url = self.context.absolute_url().replace(site.absolute_url(), '')\n urls = []\n if url == '':\n url = '/'\n quoted_url = urllib.quote(url)\n \n urls.append(quoted_url)\n urls.append(quoted_url + '/view')\n canonical_url = urllib.quote(context_state.canonical_object_url())\n if canonical_url not in urls:\n urls.append(canonical_url)\n urls.append(canonical_url + '/view')\n\n query = 'SELECT * FROM statistics WHERE url IN %s' % str(tuple(urls))\n results = Session.execute(query).fetchall()\n if results:\n for row in results:\n for key in statistics.keys():\n statistics[key] = statistics[key] + int(row[key])\n\n results_dw = Session.execute(\n 'SELECT * FROM statistics WHERE url=\"%s/at_download%%\"' % quoted_url).fetchall()\n if results_dw:\n for row in rows_stat:\n for key in statistics.keys():\n downloads[key] = downloads[key] + int(row[key])\n statistics['downloads'] = downloads['pages']\n return statistics",
"def parsing_all_page(url):\n html_doc = get_html(url)\n# html_doc = get_html_local()\n page_count = get_html_count(html_doc)\n print 'All have find pages %d' % page_count\n\n projects = []\n\n for page in range(1, page_count + 1):\n print 'Parsing %d%%' % (page*100/page_count)\n\n url = BASE_URL + '?page=%d' % page\n projects.extend(process_page(url))\n\n return projects",
"def patrimony_total(self):\n pass",
"def run(self):\n with open(self.source_file) as file:\n for index, mem_access in enumerate(file):\n access_type = mem_access.split(' ')[0]\n address = int(mem_access.split(' ')[1], 16)\n self.page_table.query(address, access_type, index)\n return {\"memory_accesses\": self.mem_accesses,\n \"page_faults\": self.page_table.page_faults,\n \"writes_to_disk\": self.page_table.writes_to_disk}",
"def advancedStats():",
"def parse_pdfs():\n # get all of the pdf files in the dir\n pahopdffiles = [f for f in listdir(paho_raw_reports_dir) if isfile(join(paho_raw_reports_dir, f))]\n # set up a list to hold the data for all pdf files\n all_pdf_data = []\n # read in each pdf file\n for pahopdffile in pahopdffiles:\n try:\n logging.info(\"Now attempting to read in: \"+pahopdffile)\n fullfilepath = os.path.join(paho_raw_reports_dir, pahopdffile)\n tables = camelot.read_pdf(fullfilepath)\n # get the pandas dataframe from each pdf\n pdfdataframe = tables[0].df\n # ensure that this is a valid PAHO COVID19 report\n report_keywords = ['Cumulative','COVID-19','Americas'] \n if not all(x in pdfdataframe[0].iloc[0] for x in report_keywords):\n logging.error(pahopdffile+\" was not recognised as a normal PAHO pdf file. Skipping.\")\n continue\n # set up the list to hold the data for this file\n reportdata = []\n # create a variable to store the date of this report\n date = None\n # create a variable to store the last subregion seen\n subregion = None\n # PAHO has different formats for their tables, so we need to check the number of columns in the pdf\n numcolumns = len(pdfdataframe.columns)\n # get the row index for the last country\n lastcountryrowindex = pdfdataframe[1][pdfdataframe[1] == 'Total'].index[0]-1\n for rowindex,rowdata in pdfdataframe.iterrows():\n # set up variables to hold the data for the dict\n country_or_territory_name = None\n confirmed_cases = None\n probable_cases = None\n probable_deaths = None\n recovered = None\n percentage_increase_confirmed = None\n if numcolumns == 6:\n # this is the old format that they started with\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].replace('Cumulative suspected and confirmed COVID-19 cases reported by \\ncountries and territories in the Americas, as of ','')\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n if not date:\n raise RuntimeError(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n elif rowindex in range(4,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[1] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[1] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[1]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[2] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[2].replace(\",\",\"\"))\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[5]\n # store null data for all other fields that were not present in the old reports\n probable_deaths = None\n recovered = None\n percentage_increase_confirmed = None\n elif numcolumns == 9:\n # PAHO added in probable cases\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].split(\", as of \")[1]\n if \"\\n\" in rawdate:\n rawdate = rawdate.split(\"\\n\")[0]\n try:\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n except ValueError:\n logging.error(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n raise\n elif rowindex in range(4,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[1] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[1] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[1]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[2] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # there is a report where this column was merged for some reason\n if \"\\n\" in rowdata[2]:\n split_numbers = rowdata[2].split(\"\\n\")\n confirmed_cases = int(split_numbers[0].replace(\",\",\"\"))\n probable_cases = int(split_numbers[1].replace(\",\",\"\"))\n confirmed_deaths = int(split_numbers[2].replace(\",\",\"\"))\n probable_deaths = int(split_numbers[3].replace(\",\",\"\"))\n recovered = None\n percentage_increase_confirmed = float(rowdata[7].replace(\"%\",\"\"))\n transmission_type = rowdata[8]\n # continue with the next row for this broken report\n continue\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[2].replace(\",\",\"\"))\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_deaths = None\n else:\n # store this string\n probable_deaths = rowdata[5]\n if rowdata[6] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n recovered = None\n else:\n # store this string\n recovered = int(rowdata[6].replace(\",\",\"\"))\n if rowdata[7] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n percentage_increase_confirmed = None\n else:\n # store this string\n percentage_increase_confirmed = float(rowdata[7].replace(\"%\",\"\"))\n if rowdata[8] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[8]\n elif numcolumns == 10:\n # PAHO added in country ISO codes and special characters\n if rowindex == 0:\n # this row contains the date for this report\n rawdate = rowdata[0].split(\", as of \")[1]\n if \"\\n\" in rawdate:\n rawdate = rawdate.split(\"\\n\")[0]\n try:\n date = datetime.strptime(rawdate,\"%d %B %Y\")\n except ValueError:\n logging.error(\"Unable to determine the date of this report. Row 0 contained this data: \"+\n rowdata[0])\n raise\n elif rowindex in range(3,lastcountryrowindex+2):\n # all these rows contain data for countries/regions\n # so parse the useful data for each\n # some of these rows contain subtotals per region/territory\n if rowdata[0] != '':\n # store the name of the last seen subregion\n subregion = rowdata[0]\n if rowdata[2] == \"Subtotal\":\n # on the subtotal rows, store the name for the entire subregion\n country_or_territory_name = subregion\n elif rowdata[2] == \"Total\":\n # on the last row, store the name All Americas to represent the total\n country_or_territory_name = \"All Americas\"\n else:\n # else store the name for the specific country\n country_name = rowdata[2]\n # note that country names may also have special characters\n country_name = re.sub('[^A-Za-z0-9,()\\[\\] ]+', '', country_name)\n country_or_territory_name = country_name\n # for each of the other columns, check if empty, else store the data present in the cell\n if rowdata[3] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_cases = None\n else:\n # there is a report where this column was merged for some reason\n if \"\\n\" in rowdata[3]:\n split_numbers = rowdata[3].split(\"\\n\")\n confirmed_cases = int(split_numbers[0].replace(\",\",\"\"))\n probable_cases = int(split_numbers[1].replace(\",\",\"\"))\n confirmed_deaths = int(split_numbers[2].replace(\",\",\"\"))\n probable_deaths = int(split_numbers[3].replace(\",\",\"\"))\n recovered = None\n percentage_increase_confirmed = float(rowdata[8].replace(\"%\",\"\"))\n transmission_type = rowdata[9]\n # continue with the next row for this broken report\n continue\n else:\n # remove the comma and parse to an int\n confirmed_cases = int(rowdata[3].replace(\",\",\"\"))\n if rowdata[4] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_cases = None\n else:\n # remove the comma and parse to an int\n probable_cases = int(rowdata[4].replace(\",\",\"\"))\n if rowdata[5] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n confirmed_deaths = None\n else:\n # remove the comma and parse to an int\n confirmed_deaths = int(rowdata[5].replace(\",\",\"\"))\n if rowdata[6] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n probable_deaths = None\n else:\n # store this string\n probable_deaths = rowdata[6]\n if rowdata[7] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n recovered = None\n else:\n # store this string\n recovered = int(rowdata[7].replace(\",\",\"\"))\n if rowdata[8] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n percentage_increase_confirmed = None\n else:\n # store this string\n percentage_increase_confirmed = float(rowdata[8].replace(\"%\",\"\"))\n if rowdata[9] == \"\":\n # none is used to replace NULL in the db. This represents an unknown quantity\n transmission_type = None\n else:\n # store this string\n transmission_type = rowdata[9]\n else:\n logging.error(\"Unrecognised number of columns in the pdf file. Skipping for now.\"+\n \"Check if the report format changed from PAHO.\")\n # if we were at least able to scrape the country or territory name, create a dict and add it to the list\n if country_or_territory_name is not None:\n # set up the dict to store each row of data\n reportdict = collections.OrderedDict()\n # add the values to the dict in the order that we want for the report\n reportdict['date'] = date\n reportdict['country_or_territory_name'] = country_or_territory_name\n reportdict['confirmed_cases'] = confirmed_cases\n reportdict['probable_cases'] = probable_cases\n reportdict['confirmed_deaths'] = confirmed_deaths\n reportdict['probable_deaths'] = probable_deaths\n reportdict['recovered'] = recovered\n reportdict['percentage_increase_confirmed'] = percentage_increase_confirmed\n reportdict['transmission_type'] = transmission_type\n # now add this dict to our list for this report/pdf\n reportdata.append(reportdict)\n # once we are done adding all data for this pdf, add this pdf report to the list of all reports\n # if the reportdata list is not empty\n if reportdata:\n all_pdf_data.append(reportdata)\n logging.info(\"Successfully parsed \"+pahopdffile)\n except Exception as exc:\n logging.exception(\"Problem found while parsing \"+pahopdffile)\n raise\n logging.info(\"Completed parsing all pdfs in folder.\")\n return all_pdf_data",
"def _addPageRatio(self, outlines, pageLabels):\n for i in range(0, len(outlines)):\n outline = outlines[i]\n if type(outline) == list:\n self._addPageRatio(outlines[i], pageLabels)\n continue\n elif not outline.has_key('/Page'):\n print (\"Error: outline has no key '/Page'\")\n sys.exit(-1)\n pageHeight = outline['/Page']['/MediaBox'][-1]\n idIndirect = outline.page.idnum\n if pageLabels.has_key(idIndirect):\n pageNum = pageLabels[idIndirect]\n else:\n print ('Error: Page corresponds to IndirectObject %d not Found' % idIndirect)\n sys.exit(-1)\n if outline.has_key('/Top'):\n top = outline['/Top']\n else:\n top = pageHeight\n if outline.has_key('/Zoom'):\n zoom = outline['/Zoom']\n else:\n zoom = 1\n outline = dict(outline)\n try:\n outline['/Ratio'] = pageNum + (1 - top / zoom / pageHeight)\n except:\n pass\n outlines[i] = outline",
"def _count_pages_pdf(self, bin_pdf):\n pages = 0\n for match in re.compile(r\"/Count\\s+(\\d+)\").finditer(bin_pdf):\n pages = int(match.group(1))\n return pages"
] | [
"0.5614058",
"0.5522709",
"0.5506824",
"0.5450965",
"0.5391519",
"0.5355411",
"0.531937",
"0.5312551",
"0.5290875",
"0.5287495",
"0.52719283",
"0.5268888",
"0.5259223",
"0.5246753",
"0.5223558",
"0.52189976",
"0.5212078",
"0.51717114",
"0.5152129",
"0.5145579",
"0.51427794",
"0.5131529",
"0.510768",
"0.5098244",
"0.5095091",
"0.5072014",
"0.5052193",
"0.50403297",
"0.5038493",
"0.5036697"
] | 0.56087923 | 1 |
Create a new text input instance. colorNames a sequence of strings (each color must start with a different letter) | def __init__(self, colorNames):
self._lengthOfPattern = 0 # will later be queried from the user
self._palette = '' # initials for color choices, e.g., R for red
for color in colorNames:
self._palette += color[0].upper() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, colorNames):\n self._colorOptions = '' # initials for color choices\n for color in colorNames:\n self._colorOptions += color[0].upper()\n # following will be reset when startGame is called\n self._currentTurnNum = self._lengthOfPattern = self._maxNumberOfTurns = 0",
"def mkColor(self, name):\n known_attrs = [ 'font-family', 'font-style', 'font-weight', 'font-size', 'text-decoration', 'color', 'background-color' ]\n stack = []\n color = Color(name)\n for token in self.tokenizer:\n if token.text == \";\":\n stack[0].assert_symbol_name\n if stack[0].text not in known_attrs: raise Exception(\"%d:%d: Unknown color attribute %s\" % (stack[0].line, stack[0].col, stack[0].text))\n stack[1].must_be(\":\")\n stack[2].must_match(\"^\\w\", \"%d:%d: Expected a color attribute value instead of %s\" % (stack[2].line, stack[2].col, stack[2].text))\n color.attrs[stack[0].text] = stack[2].text\n stack = []\n elif token.text == \"}\":\n return color\n else:\n stack += [token]\n raise Exception(\"%d:%d: End-of-file reached while scanning color %s defined here.\" % (name.line, name.col, name.text))",
"def from_string(cls, text_color):\n\n a = 255\n try:\n r, g, b, a = text_color.replace('rgb(', '').replace(')', '').split(',')\n except ValueError:\n r, g, b = text_color.replace('rgb(', '').replace(')', '').split(',')\n\n return cls(int(r), int(g), int(b), int(a))",
"def test_color__name_str_arg(self):\n for name in (\"aquamarine3\", \"AQUAMARINE3\", \"AqUAmArIne3\"):\n color = pygame.Color(name)\n\n self.assertEqual(color.r, 102)\n self.assertEqual(color.g, 205)\n self.assertEqual(color.b, 170)\n self.assertEqual(color.a, 255)",
"def __init__(self, name, color):\n self.name = name\n self.color = color",
"def __init__(self, text, start, end, color, alpha=1):\n self.text = text\n self.start = start\n self.end = end\n self.color = color\n self.alpha = alpha",
"def test_is_valid_color_name(self):\n self.assertTrue(is_valid_color_name('black'))\n self.assertTrue(is_valid_color_name('red'))\n self.assertFalse(is_valid_color_name('#aabb11'))\n self.assertFalse(is_valid_color_name('bl(ack'))",
"def from_name (name_str):\n if name_str in colour_names:\n return Colour(*colour_names[name_str])\n raise KeyError(\"'%s' is not a recognized colour name\"%name_str)",
"def __init__(self, input_color, location, white_symbol, black_symbol):\n assert isinstance(input_color, Color)\n assert isinstance(location, Location)\n assert isinstance(white_symbol, str)\n assert isinstance(black_symbol, str)\n\n self.color = input_color\n self.location = location\n\n if self.color == color.white:\n self.symbol = white_symbol\n else:\n self.symbol = black_symbol",
"def __init__(self, red=Black.red, green=Black.green, blue=Black.blue):\n self.color = Color(red, green, blue)\n\n self.template = '\\ttextcolor = {textcolor};\\n'",
"def create(data):\n \n # init color\n color = Color(\n color_id = data.get('id'),\n name = data['name'],\n rgb = data['rgb'],\n is_trans = data['is_trans'])\n \n # get external names and IDs\n if 'external_ids' in data:\n for name, value in data['external_ids'].items():\n color.external_names[name] = [n for l in value['ext_descrs'] for n in l]\n color.external_ids[name] = value['ext_ids']\n \n return color",
"def create_color(cls, text_color: int, background_color: int) -> int:\n global _COLOR_COUNTER\n unicurses.init_pair(_COLOR_COUNTER, text_color, background_color)\n color = unicurses.color_pair(_COLOR_COUNTER)\n _COLOR_COUNTER += 1\n return color",
"def _colorstr(self, args):",
"def create_label(self, name, color):\n self.color = color #remove this line not needed\n json = None\n if name and color:\n data = {'name': name, 'color': color.strip('#')}\n # post url data\n # json = json resp\n return json",
"def test__TextInputStyle__name():\n for instance in TextInputStyle.INSTANCES.values():\n vampytest.assert_instance(instance.name, str)",
"def _makeColor(self, renderer, name, space, color):\n # assemble the arguments\n args = (renderer.literal(value) for value in color)\n # build and return the expression\n return renderer.set(name=name, value=renderer.call(func=space, args=args))",
"def color_text(text, color_name):\n\n if use_color():\n return colorama.Fore.__dict__[color_name.upper()] + text + colorama.Style.RESET_ALL\n else:\n return text",
"def from_str (s):\n try: \n return from_csv(s)\n except Exception: \n pass\n \n try: \n return from_hex(s)\n except Exception: \n pass\n\n try:\n return from_name(s)\n except Exception: \n pass\n\n raise ColourFormatError(\"'%s' is not a recognized colour string\"%s)",
"def _color(self, text, color_name=None, bold=False):\n\n if self.disable_color == True:\n return text\n \n if color_name == None:\n color_name = 'YELLOW'\n\n if color_name in self.colors:\n return '\\033[{0};{1}m{2}\\033[0m'.format(\n int(bold), self.colors.index(color_name) + 30, text)\n\n raise Exception('ERROR: \"{0}\" is not a valid color.\\n'.format(color_name))\n raise Exception('VALID COLORS: {0}.\\n'.format(', '.join(self.colors)))",
"def __selectColorName(self):\n editor = e5App().getObject(\"ViewManager\").activeWindow()\n if editor is None:\n return\n \n if editor.hasSelectedText():\n currColor = editor.selectedText()\n if currColor not in QColor.colorNames():\n E5MessageBox.critical(\n self.__ui,\n self.tr(\"Color String\"),\n self.tr(\n \"\"\"<p>The selected string <b>{0}</b> is not a\"\"\"\n \"\"\" valid color name. Aborting!</p>\"\"\")\n .format(currColor))\n return\n else:\n currColor = \"\"\n \n from ColorString.ColorSelectionDialog import ColorSelectionDialog\n dlg = ColorSelectionDialog(currColor, self.__ui)\n if dlg.exec_() == QDialog.Accepted:\n colorStr = dlg.getColor()\n editor.beginUndoAction()\n if editor.hasSelectedText():\n editor.replaceSelectedText(colorStr)\n else:\n line, index = editor.getCursorPosition()\n editor.insert(colorStr)\n editor.setCursorPosition(line, index + len(colorStr))\n editor.endUndoAction()",
"def NamedColour(*args, **kwargs):\n val = _gdi_.new_NamedColour(*args, **kwargs)\n return val",
"def importColors(colorlist):\n colordict=getColorDict()\n scribus.statusMessage(\"Defining new colors...\")\n scribus.progressTotal(len(colorlist))\n i=0\n for color in colorlist:\n name=color[0]\n c=color[1]\n m=color[2]\n y=color[3]\n k=color[4]\n while colordict.has_key(name):# check if color already exists - then add PREFIX to name\n name = PREFIX+name\n \n scribus.defineColor(name, c, m, y, k)\n i=i+1\n scribus.progressSet(i)",
"def __init__(self, name, hunger, color=\"Green\"):\r\n super().__init__(name, hunger)\r\n self._color = color",
"def text(self, str: str, x: int, y: int, colour: int, /) -> None:",
"def setColors(self, colors, indexes=None):\n colors = np.array(colors, np.float32)\n if indexes is None:\n # Change colors to the whole string\n self.allVertices['rgba'][:] = glm.vec4(colors)\n for item in self.colors:\n item[-1] = colors\n else:\n indexes = np.array(indexes, np.int32)\n assert len(colors) == len(indexes)\n # Adjust indexes\n off = 0\n j = 0\n for i, c in enumerate(self.text):\n if c in self.NO_GLYPH_CHARS:\n off += 1\n if i == indexes[j]:\n if j < len(indexes) - 1:\n j += 1\n break\n continue\n elif i < indexes[j]:\n continue\n else:\n self.allVertices['rgba'][\n 4 * (i - off):4 * (i - off + 1)] = colors[j]\n self.colors[i][-1] = colors[j]\n if j < len(indexes) - 1:\n j += 1\n else:\n break\n self.mesh.update()",
"def color(name):\n\tif name not in colors:\n\t\traise ValueError('Bad color %s' % repr(name))\n\treturn u'§' + colors[name]",
"def create_color(colorstr):\n\ttry:\n\t\treturn pygame.Color(colorstr[:7])\n\texcept TypeError:\n\t\tprint >>sys.stderr, \"Invalid color: \", colorstr[:7]\n\t\treturn pygame.Color(0,0,0,255)",
"def colored (string_, color, attrs):\n return string_",
"def __init__(self,name,value,*args,**kargs):\n color = colors.colorName(value)\n self.input = QtGui.QPushButton(color)\n InputItem.__init__(self,name,*args,**kargs)\n self.setValue(color)\n self.connect(self.input,QtCore.SIGNAL(\"clicked()\"),self.setColor)\n self.layout().insertWidget(1,self.input)",
"def class_colors(names):\r\n return {name: (\r\n random.randint(0, 255),\r\n random.randint(0, 255),\r\n random.randint(0, 255)) for name in names}"
] | [
"0.6373945",
"0.61426556",
"0.6063771",
"0.60201144",
"0.5951335",
"0.5880087",
"0.58047354",
"0.5626059",
"0.56203663",
"0.55802214",
"0.55743384",
"0.556991",
"0.5532483",
"0.5503226",
"0.5380365",
"0.5372865",
"0.53544044",
"0.53481925",
"0.5348108",
"0.5277353",
"0.5235065",
"0.5209213",
"0.51882446",
"0.51632047",
"0.51484644",
"0.5142133",
"0.51390916",
"0.51094925",
"0.5097588",
"0.50815237"
] | 0.7074272 | 0 |
Robustly prompt the user for an integer from small to large. | def _readInt(self, prompt, small, large):
prompt = prompt + ' (from ' + str(small) + ' to ' + str(large) + ')? '
answer = small - 1 # intentionally invalid
while not small <= answer <= large:
try:
answer = int(raw_input(prompt))
if not small <= answer <= large:
print 'Integer must be from '+str(small)+' to '+str(large)+'.'
except ValueError:
print 'That is not a valid integer.'
return answer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prompt_int(prompt):\n while True:\n try:\n return int(input(prompt))\n except ValueError as e:\n print('Provide an integer')",
"def input_int(question):\n while True:\n try:\n value = int(input(question))\n except (SyntaxError, NameError) as exception:\n print(\"Invalid entry. Try again.\")\n continue\n\n if value <= 0:\n print(\"Invalid entry. Try again.\")\n continue\n else:\n break\n\n return value",
"def integer_input( min_value=0, max_value=999, default=0, \n prompt=\"please type number and press ENTER\"):\n while True:\n raw = input(prompt)\n if not raw.isdigit():\n print(\"please enter a number\")\n continue\n raw = int(raw)\n if min_value <= raw <= max_value:\n return raw\n print(\"please enter value between {} and {}\".format(min_value,\n max_value))",
"def get_integer(prompt: str, error_prompt: str, limits_prompt: str, min_num: int = -float('inf'),\n max_num: int = float('inf')) -> int:\n while True:\n try:\n integer = int(input(prompt))\n if max_num >= integer >= min_num:\n return integer\n print(limits_prompt)\n except ValueError:\n print(error_prompt)",
"def ask_number (question,low,high):\n response = None\n while response not in range(low,high):\n response = int(input(question))\n return response",
"def get_integer_entry(prompt=\"0\", text=\"Input integer value\"):\n while True:\n data = input(\"{} [{}]:\".format(text, prompt))\n if data == \"\":\n data = prompt\n try:\n return abs(int(data))\n except ValueError as e:\n if debug: print(\"Value Error: {}\".format(e))\n continue",
"def enterInteger(CustomMessage=\"Please enter an integer: \",\r\n CustomErrorMessage=\"The input is not an integer, please try again...\",\r\n min=None, max=None):\r\n \r\n isInteger = False\r\n while not isInteger:\r\n try:\r\n number = int(input(CustomMessage))\r\n isInteger = True\r\n except ValueError:\r\n print(CustomErrorMessage)\r\n\r\n # range parameter\r\n if type(min) is int and type(max) is int:\r\n if min > max:\r\n raise ValueError(\"parameter 'min' is larger than 'max'\")\r\n else:\r\n while min > number or number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number within \"+str(min)+\" to \"+str(max)+\": \")\r\n elif type(min) is int:\r\n while min > number:\r\n number = enterInteger(CustomMessage=\"Please input a number larger than \" + str(min) + \": \")\r\n elif type(max) is int:\r\n while number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number smaller than \" + str(max) + \": \")\r\n\r\n return number",
"def get_raw_input() -> int:\n return int(input(\"> \"))",
"def get_positive_int(prompt):\n while True:\n n = int(input(prompt), 10)\n if n > 0:\n break\n \n return n",
"def Demo():\n print(\"Users input:\", GetInteger())\n print(\"Users input:\", GetInteger(lowerbound=-3, upperbound=10))\n input(\"Please press <Enter> to exit the demo.\")",
"def ask_number(question, low, high):\n response = None\n while response not in range (low, high):\n response = int(input(question))\n return response",
"def prompt_number(prompt, low_limit = 1, high_limit = 65535):\n while True:\n try:\n response = int(prompt_base(prompt))\n if low_limit <= response <= high_limit:\n return response\n except:\n pass",
"def get_int_input_constrained(prompt, value_min, value_max, value_default):\n\n input_value = 0\n while input_value < 1:\n txt = input(prompt)\n try:\n input_value = min(max(int(txt), value_min), value_max)\n except ValueError:\n input_value = value_default\n\n return (True, input_value)",
"def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response",
"def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response",
"def _int_input_in_range(self, print_out, range_):\n try:\n i = int(input(print_out))\n assert range_[0] <= i <= range_[1]\n return i\n except AssertionError:\n print('Please, enter a vaild number')\n return None\n except ValueError:\n print('Please, enter a number not a string')\n return None",
"def secure_input(self, minimum, maximum):\n wrong_input = True\n while wrong_input:\n while True:\n try:\n choice = int(input())\n break\n except ValueError:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n if choice < minimum or choice > maximum:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n else:\n wrong_input = False\n return choice",
"def get_num(*, prompt='Number? '):\n num = 0\n while True:\n try:\n num = int(input(prompt))\n except ValueError:\n print('Was that a number? Try again!')\n continue\n else:\n break\n return num",
"def useti(self, prompt=None, default=None):\n \n i = 0\n abak = copy(default) # Backup our default value\n\n a = abak\n while(i<self.maxTries):\n tmp = self.uset(prompt,default)\n try:\n a = float(tmp)\n a = int(a)\n i = self.maxTries # preload failure\n except:\n # Print warning\n print\n print \" WARNING: Invalid Entry. Please enter an integer!!\"\n print \n # reload the default\n a = abak\n i = i+1\n \n return(a)",
"def get_int(self):\n while True:\n try:\n choice = int(input(\"Choose: \"))\n if 1 <= choice <= len(self.menu):\n return choice\n print(\"Invalid choice.\")\n except (NameError,ValueError, TypeError,SyntaxError):\n print(\"That was not a number, genious.... :(\")",
"def get_user_input(prompt):\n while True:\n user_input = input(prompt)\n try:\n tmp = int(user_input)\n return tmp\n except ValueError:\n print('Not a number')",
"def get_num(prompt='Number? '):\n _num = 0\n while True:\n try:\n _num = int(input(prompt))\n except ValueError:\n print('Was that a number? Try again!')\n continue\n else:\n break\n return _num",
"def ask_number(low, high, tries):\n the_number = None\n while the_number not in range(low, high):\n the_number = int(input(\"Enter a number between 1-100: \"))\n return the_number\n print(\"The computer has\", tries, \"tries to guess your number\\n\")",
"def sanitized_int_input(s: str) -> int:\n\n v = input(s)\n if is_convertible_to_int(v):\n return int(v)\n else:\n print(\"There was an error, please enter a number.\")\n return sanitized_int_input(s)",
"def getNumber(prompt):\n output = input(prompt)\n if output.lower() == 'exit':\n return -1\n while output.isdigit() == False or int(output) > 9 or int(output) < 1:\n output = input(prompt)\n return int(output)",
"def check_user_input_if_integer(user_input):\n integer_input = ''\n while not integer_input:\n try:\n integer_input = int(user_input)\n except ValueError:\n logging.warn('only integer number accepted')\n user_input = input('enter a number: ')\n\n return integer_input",
"def user_selection(num, text):\n lst = list(range(1,num+1))\n answer= 0\n while answer not in lst:\n try:\n answer = int(input(text))\n \n if answer not in range(1,num+1):\n raise ValueError\n break\n except ValueError:\n print('Select a valid Number')\n\n return answer",
"def get_int():\n\twhile True:\n\t\ttry:\n\t\t\tX = int(raw_input())\n\t\t\tbreak\n\t\texcept:\n\t\t\tprint \"Could not convert input to integer\"\n\t\t\tcontinue\n\treturn X",
"def get_employee_input_int(message):\n while True:\n user_input = input('{}: '.format(message))\n\n # Type validation\n try:\n number = int(user_input)\n break\n except ValueError:\n print('You must enter a whole number.')\n continue\n\n #Range Validation\n # if valid_range and number not in valid_range:\n # _min = min(valid_range)\n # _max = max(valid_range)\n # print('You must enter a number from {} to {}.'.format(_min, _max))\n # continue\n return number",
"def get_number_input(msg=\"Provide a number: \", num_type=int):\n while True:\n try:\n num = num_type(input(msg))\n except ValueError:\n print(f\"Whoops!! Please enter a correct number of {num_type}!!\")\n continue\n else:\n print(\"Number accepted!!\")\n return num"
] | [
"0.7585431",
"0.74412924",
"0.7407625",
"0.7393844",
"0.7116075",
"0.7100335",
"0.7029069",
"0.7021394",
"0.69807756",
"0.69662374",
"0.69629014",
"0.69590944",
"0.6943949",
"0.69393766",
"0.69393766",
"0.690722",
"0.68896306",
"0.6840757",
"0.6829219",
"0.6797315",
"0.6749956",
"0.671389",
"0.6706469",
"0.67045593",
"0.67034775",
"0.6695541",
"0.66953796",
"0.668686",
"0.66528195",
"0.6650516"
] | 0.77045965 | 0 |
Ask the user how many pegs in the secret pattern. | def queryLengthOfPattern(self):
self._lengthOfPattern = \
self._readInt('How many pegs are in the secret', 1, 10)
return self._lengthOfPattern | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def guessTheSecret():\n\tguess = int(input('Guess the number > '))\n\tglobal attempts\n\tcheck = False\n\twhile guess != secret_num:\n\t\tif guess < secret_num:\n\t\t\tprint('Your guess is too low')\n\t\telif guess > secret_num:\n\t\t\tprint('You guess to too high')\n\t\tguess = int(input('Guess again > '))\n\t\tattempts += 1\n\t\tif attempts >= 4:\n\t\t\tbreak\n\tif guess == secret_num:\n\t\treturn True",
"def main():\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n while not is_valid_password(password):\n print(\"Invalid password!\")\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n print(\"*\" * len(password))",
"def user_avoid_count():\n\tforbidden = input('Enter a string of forbidden letters.\\n> ')\n\tprint(len({w for w in word_set if avoids(w, forbidden)}))",
"def ask_user():\r\n password_lenght = 0\r\n while password_lenght == 0:\r\n try:\r\n password_lenght = int(input(\"How long password you want? Enter the number... \"))\r\n if password_lenght <= 0:\r\n print(\"Try to enter any number greater than 0...\")\r\n continue\r\n return password_lenght\r\n except Exception:\r\n continue",
"def pwd_len():\r\n while True:\r\n password_length = input('How much length for password u want ? Minimum length is 6 and Maximum length is 25 : ')\r\n try:\r\n password_length = int(password_length)\r\n if 6 <= password_length <= 25:\r\n break\r\n else:\r\n print('{} is not in range'.format(password_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(password_length))\r\n return password_length",
"def symbol_len(password_length):\r\n while True:\r\n symb_length = input('How much symbols you want in password? At least 1 : ')\r\n try:\r\n symb_length = int(symb_length)\r\n if 1 <= symb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(symb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(symb_length))\r\n return symb_length",
"def determine_attempts():\r\n #Inputs: # of attempts requested by user\r\n #Outputs: game gives number of attempts user selected before ending \r\n how_many_tries = int(raw_input(\"How many attempts do you want to answer a blank correctly before the answer is provided to you? Please provide a number, such as 2.\\n\"))\r\n attempts = how_many_tries\r\n number_of_tries = 5\r\n while how_many_tries < 1:\r\n print \"Please try again.\"\r\n determine_attempts\r\n attempts = attempts + 1\r\n if attempts == number_of_tries:\r\n break \r\n else:\r\n print \"Please read the paragraph below and provide the answers to fill in the numbered blanks.\\nYou will be given \" + str(attempts) + \" chances to enter the correct answer before it is provided to you.\\n\"\r\n return how_many_tries",
"def guesses():\n tries = 3\n print (\" You may choose your maximum number of tries per question.\"\n \"The default is 3.\")\n player_prompt = \" Please type in your preferred number: \"\n while tries > 0:\n user_choice = raw_input(player_prompt)\n if user_choice.isdigit():\n print \"\\n OK, {} {} allowed per blank. Here we go!\\n\".format(user_choice, how_many(user_choice))\n return int(user_choice)\n tries -= 1\n player_prompt = (\" Silly, that's not a valid number of guesses! {} more {}. \\n\"\n \" Try again: \").format(tries, how_many(tries))\n if tries == 0:\n print \" You defaulted your number of guesses, so 3 it is!\"\n return 3",
"def set_n_players(self):\n complain = \"\"\n while True:\n clear_output()\n try:\n self.n_players = int(\n input(f\"{complain}Please insert the number of players (between 2 to 6): \\n\"))\n if self.n_players >= 2 and self.n_players < 7:\n self.start_troops = 120 / self.n_players\n break\n elif self.n_players < 2:\n complain = \"Not enough players!\\n\"\n elif self.n_players >= 7:\n complain = \"Too many players!\\n\"\n except:\n complain = \"Not a valid number!\\n\"\n pass",
"def find_max_guesses():\n print(\"You'll get 5 guesses per problem!\")\n return 5",
"def countGuesses(hidden):\r\n guess = random.choice(range(0, 100)) # 0 to 99, inclusive\r\n numguesses = 1 # we just made one guess, above\r\n while guess != hidden:\r\n guess = random.choice(range(0, 100)) # guess again!\r\n numguesses += 1 # add one to our number of guesses\r\n return numguesses",
"async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))",
"def user_pick(self):\n player_taking = True\n while player_taking:\n play_take = int(input(\"How many dots would you like to remove?(1-4)\"))\n if not 1 <= play_take <= 4:\n print(\"You may only take between 1 and 4 balls\")\n else:\n player_taking = False\n return play_take",
"def get_puzzle_no():\r\n \r\n puzzle_no = int(input(\"Enter the number of the puzzle to print the trace of (1-25): \"))\r\n while puzzle_no < 1 or puzzle_no > 25:\r\n print(\"Choice is invalid! Try again\")\r\n puzzle_no = int(input(\"Enter the number of the puzzle to print solution of (1-25): \"))\r\n \r\n return puzzle_no",
"def get_user_input(arg_pair: EviPair):\n global HUMAN_CORRECT_PRED\n\n while True:\n try:\n choice = int(raw_input())\n\n if choice in [1,2]:\n\n if choice == arg_pair.label:\n HUMAN_CORRECT_PRED += 1\n\n break\n else:\n print(WRONG_INPUT)\n except ValueError:\n print(WRONG_INPUT)\n\n return choice",
"def test_pick():\r\n global user_pick\r\n while user_pick > pickno or user_pick <= 0 or type(user_pick):\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n #Keeps the number of balls picked by user to be between 0 and 4\r",
"def user_picks():\r\n print (\"Enter the second to last posted Fantasy 5 lotto numbers from 1 to 42:\")\r\n ui = []\r\n while len(ui) < 5:\r\n print (len(ui) + 1,)\r\n try:\r\n i = int(input(\"--> \" ))\r\n # check if i is unique and has a value from 1 to 42\r\n # and is an integer, otherwise don't append\r\n if (i not in ui) and (1 <= i <= 42): \r\n ui.append(i)\r\n except:\r\n print (\"Enter an integer number!\")\r\n return ui",
"def number_len(password_length):\r\n while True:\r\n numb_length = input('How much numbers you want in password? At least 1 : ')\r\n try:\r\n numb_length = int(numb_length)\r\n if 1 <= numb_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(numb_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(numb_length))\r\n return numb_length",
"def exercise4():\n rolls = easygui.integerbox('How many 7s:', 'Input', '', 0, 2 ** 31)\n total = count_sevens( rolls )\n percent = rolls * 100 / total\n easygui.msgbox(\"{} out of {} rolls ({:.2f}%) were 7.\".format(rolls, total, percent))",
"def take_user_input():\n window = turtle.Screen()\n window.bgcolor(\"black\")\n size = int(window.textinput(\n \"Maze Creation\", \"Size of the maze:\"))\n if size % 2 == 0:\n size += 1\n return size",
"def passwd_prompt():\n\n print(\"Passwords MUST contain AT LEAST: one lower-case letter,\" \n \"one number, one symbol, and be a MINIMUM of 8 characters in length,\"\n \"e.g. r!ght2oE\")\n\n while True:\n\n passy = getpass.getpass(prompt=\"Enter password for user: \")\n confirm_passy = getpass.getpass(prompt=\"To confirm, \" \\\n \"re-enter password: \")\n\n # check for the following conditions: \n # user input matches\n # length of input is at least 8 characters\n # input contains at least 1 number \n # input contains at least 1 letter \n # input contains at least 1 symbol \n \n if passy != confirm_passy \\\n or len(passy) <8 \\\n or not re.search('\\d', passy) \\\n or not re.search(r\"[a-z]\",passy) \\\n or not re.search(r\"[ !#$%&'()*+,-./[\\\\\\]^_`{|}~\"+r'\"]', passy): \n \n print(TRY_AGAIN)\n continue \n \n else:\n print(\"Password meets complexity requirement. Continuing...\") \n return passy",
"def setup_number_of_faces():\n \n while True:\n faces = int(input(\"Geben Sie die Seitenanzahl der Würfel an (2 - 100) oder tippe '0' zum A\\\nbbruch: \"))\n if 2 <= faces <= 100:\n break\n elif faces == 0:\n quit()\n else:\n print(\"ERROR: Du musst eine Zahl zwischen 2 und 100 eingeben!\")\n print()\n print()\n return faces",
"def get_dungeon_size():\n size = input(\"Choose the size of the dungeon... (4 - 24)\\n>\")\n size = int(size)\n while size < 4 or size > 24:\n print(\"Pick a number between four and 24.\")\n size = input(\"Choose the size of the dungeon... (4 - 24)\\n>\")\n size = int(size)\n return size",
"def enterGuess(self):\n validPattern = False\n while not validPattern:\n print # intentional blank line\n prompt = 'Enter a guess (colors are '\n prompt += self._palette[:self._numColorsInUse] + '): '\n patternString = raw_input(prompt)\n \n validPattern = True\n if len(patternString) != self._lengthOfPattern:\n print 'The pattern must have', self._lengthOfPattern, 'pegs'\n validPattern = False\n else:\n for i in range(self._lengthOfPattern):\n if patternString[i].upper() not in self._palette[:self._numColorsInUse]:\n validPattern = False\n if not validPattern:\n print 'The color options are', self._palette[:self._numColorsInUse]\n \n if validPattern:\n pattern = Pattern(self._lengthOfPattern)\n for i in range(self._lengthOfPattern):\n pattern.setPegColor(i, self._palette.index(patternString[i].upper()))\n\n return pattern",
"def how_many(number):\n if int(number) == 1:\n return \"guess\"\n return \"guesses\"",
"def get_num_hexagons():\n num_hexagons = float(input('Пожалуйста, введите количество шестиугольников, располагаемых в ряд: '))\n while not (4 <= num_hexagons <= 20):\n num_hexagons = float(input('Оно должно быть от 4 до 20. Пожалуйста, повторите попытку: '))\n return num_hexagons",
"def getSecretMessage(limit):\n\n\tsecret = None\n\twhile secret == None or len(secret) not in range(1, limit+1):\n\t\tsecret = raw_input(\"Enter the secret message (Max length %d): \" % limit)\n\t\tif len(secret) > limit:\n\t\t\tprint \"Invalid message: too long!\"\n\t\telif len(secret) < 1:\n\t\t\tprint \"Invalid message: empty input!\"\n\n\treturn secret",
"def process_player_input(self,guess):\r\n # Step 1 - Catch faulty input, this is not topic of week 2\r\n\r\n # Tell the player the secret number :-)\r\n if (guess == \"Cheat\"):\r\n return \"Secret number = %d\" % (self.secret_number)\r\n \r\n # Step 2 - Verify player's input.\r\n user_input = self.verify_input(guess, self.num_range)\r\n if (type(user_input) != type(0)):\r\n # Verify_input() detected faulty input\r\n # Let's leave here with the error message\r\n return user_input\r\n\r\n # Decrease the number of still available tries\r\n if (self.remaining_guesses>0):\r\n self.remaining_guesses -= 1\r\n print \"Remaining number of tries = \", self.remaining_guesses\r\n \r\n # Step 3 - Give the player a hint for next guess\r\n if ((user_input > self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Lower!\"\r\n elif ((user_input < self.secret_number) and (self.remaining_guesses > 0)):\r\n # Give a hint just if the player has another try\r\n result_message = \"Higher!\"\r\n elif (user_input == self.secret_number):\r\n result_message = self.correctguess_message\r\n else:\r\n # As the guess was wrong and there is no further try anymore,\r\n # tell the player that he/she lost\r\n result_message = \"You tried too often than necessary, You lost!\"\r\n return result_message",
"def pick_number(low, high, limit):\n print(\"Think of a number from \" + str(low) + \" to \" +\n str(high) +\" and I will try to guess it and I will get a total of \" + str(limit) + \" tries. Press Enter when you are ready.\")\n input()",
"def get_dimension():\n\n dimension = 0\n while (dimension != '2') and (dimension != '3'):\n dimension = input(\"Which dimension do you want (2/3)? \")\n if (dimension != '2') and (dimension != '3'):\n print(\"This program doesn't support that dimension, please input again\")\n dimension = int(dimension)\n return dimension"
] | [
"0.6076266",
"0.60246533",
"0.6022768",
"0.5874914",
"0.57561094",
"0.5753638",
"0.57490975",
"0.5721011",
"0.5687204",
"0.56637275",
"0.5620408",
"0.56060576",
"0.559462",
"0.5586774",
"0.5576528",
"0.5574137",
"0.5513518",
"0.5511759",
"0.5488277",
"0.54585576",
"0.5408952",
"0.54017985",
"0.5360931",
"0.5356373",
"0.53487307",
"0.5335186",
"0.5326323",
"0.532297",
"0.53083026",
"0.52886444"
] | 0.661876 | 0 |
Get a guess from the user and return it as a Pattern instance. | def enterGuess(self):
validPattern = False
while not validPattern:
print # intentional blank line
prompt = 'Enter a guess (colors are '
prompt += self._palette[:self._numColorsInUse] + '): '
patternString = raw_input(prompt)
validPattern = True
if len(patternString) != self._lengthOfPattern:
print 'The pattern must have', self._lengthOfPattern, 'pegs'
validPattern = False
else:
for i in range(self._lengthOfPattern):
if patternString[i].upper() not in self._palette[:self._numColorsInUse]:
validPattern = False
if not validPattern:
print 'The color options are', self._palette[:self._numColorsInUse]
if validPattern:
pattern = Pattern(self._lengthOfPattern)
for i in range(self._lengthOfPattern):
pattern.setPegColor(i, self._palette.index(patternString[i].upper()))
return pattern | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_guess(self):\n return self._guess",
"def get_guess(self):\n new_guess = \"\"\n try:\n new_guess = input(\"Enter a letter: \").lower()\n if len(new_guess) > 1:\n new_guess = \"INVALID\"\n raise ValueError(\"The guess you entered was too long. Make sure that it is only one character\")\n elif len(new_guess) < 1:\n new_guess = \"INVALID\"\n raise ValueError(\"The guess you entered was too short. Make sure that it is only one character\")\n elif ord(new_guess) < 97 or ord(new_guess) > 122:\n new_guess = \"INVALID\"\n raise ValueError(\"Your input was deemed invalid! Please make sure input is a character a-z\")\n elif new_guess in self.guesses:\n print(f\"You already guessed the letter {new_guess}, try again\")\n new_guess = \"INVALID\"\n except ValueError as err:\n print(err)\n return new_guess",
"def pattern_factory(self):\n\t\treturn self.args[1]",
"def guess(self):\n\t\t\n\t\tpeg_guess_color_list = []\n\t\tguess_input = self.view.input_guess()\n\n\t\t# Convert guess_input into a list- each color being a string\n\t\tguess_color_list = re.split(\",\", guess_input)\n\t\t\n\n\t\tfor each_color in guess_color_list:\n\n\t\t\t#associate each string with a peg object\n\t\t\tpeg_guess = ColorPeg(each_color)\n\t\t\t\n\t\t\t# Append the peg_guess color list to make a list of peg guess objects\n\t\t\tpeg_guess_color_list.append(peg_guess)\n\n\t\t\t# Plug our peg objects into our guess object\n\t\t\tuser_guess = Guess(peg_guess_color_list)\n\n\t\t\t# Store guess object in our MasterModel\n\t\t\tself.model.guesses[self.model.status] = user_guess\n\n\t\t\t# Make a variable that\n\n\n\t\t# ### TESTS ###\n\t\t# print (\"This is each color: \", each_color)\n\t\t# print (\"print guess input again: \", guess_input)\n\t\t# print(\"prints each peg color for guess: \", peg_guess)\n\t\t# print(\"Prints the list of color guesses: \", peg_guess_color_list)\n\t\t# for peg_guess in peg_guess_color_list:\n\t\t# \tprint(\"Prints the list of guess pegs: \", peg_guess.peg_color)\n\n\t\t# print(\"Prints out the first list of guesses. Key = Guess 1\", self.model.guesses[\"Guess 1\"])",
"def get_input(self, guess):\r\n print\r\n print \"The player guessed = \", guess\r\n result = self.process_player_input(guess)\r\n print result\r\n if ((self.remaining_guesses == 0) or ( result == self.correctguess_message)):\r\n # Start a new game, with same range\r\n self.init(self.num_range)\r\n return result",
"def get_guess_from_user(self):\n self.guess_number = input(f\"please guess a number between 1 to {self.difficulty}: \\n\")\n while True:\n if not self.guess_number.isnumeric() or \\\n not int(self.guess_number) <= self.difficulty or \\\n not int(self.guess_number) >= 0:\n self.guess_number = input(f\"you input is invalid!! please guess a number between 1 to {self.difficulty}: \\n\")\n else:\n self.guess_number = int(self.guess_number)\n break\n return self.guess_number",
"def get_pattern(self):\n if self.pattern is None:\n pattern_str = self.blueprint.pattern()\n pattern_file = self.remgr.lookup_pattern_file(self.blueprint, self.provider)\n self.pattern = pattern.Pattern(pattern_str, pattern_file)\n self.pattern.set_provider(self)\n return self.pattern",
"def get_choice(attempt):\n try:\n user_text=''\n\n if attempt ==1:\n user_text ='Guess a number between 0 and 99:'\n \n choice = int(input(user_text))\n except ValueError:\n return get_choice()\n return choice",
"def get_guess(self):\n guess = self.player.higher_or_lower",
"def get_guess():\n print('Choose a letter:')\n return input()",
"def regex_pattern(self):\n regex_to_match = input(\"Enter the regex pattern you'd like to use> \")\n return regex_to_match",
"def get_pattern(guess, true_word):\n return sum(\n value * (3**i)\n for i, value in enumerate(pattern_trit_generator(guess, true_word))\n )",
"def get_atom_guess(self):\r\n return self._player.get_atom_guesses()",
"def getPattern(self):\n return self.pattern",
"def get_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words):\n\n\tprint \"\\n The word to guess is: \", mask\t\n\tprint \"\\n # of attempts: \", attempts\n\tprint \"\\n Insert a letter or a number \\n\"\n\tthe_guess = raw_input()\n\tthe_guess = the_guess.lower()\n\t# Check if the input is a valid character\n\tvalidity = check_validity(the_guess, valid_characters, user_guesses)\n\tif (validity is True):\n\t\t# CHeck if the user has guessed the letter\n\t\tif (check_if_guessed(the_guess, word_to_guess) >= 0):\n\t\t\tprint \"\\n Great! your choosed the correct letter!\"\n\t\t\tuser_guesses += the_guess\n\t\t\tmask = calculate_mask(user_guesses, word_to_guess)\n\t\t\tyou_won = check_if_won(user_guesses, word_to_guess, secret_words)\n\t\t\tif you_won is True:\n\t\t\t\t# If the user has won it stop the game\n\t\t\t\treturn\n\t\telse:\n\t\t\tattempts = attempts + 1\n\t\t\tprint \"\\n Sorry! the letter is not present in the word! you have now %d guess left\" % (6 - attempts)\n\t\t\tyou_lost = check_if_lost(attempts, secret_words)\n\t\t\tif you_lost is True:\n\t\t\t\t# If he user has lost it stop the game\n\t\t\t\treturn\n\telse:\n\t\tprint \"\\n The input is not valid! Insert a valid input\"\n\tget_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words)\n\treturn",
"def guess_word(self):\r\n guess = input(\"# Guess the Word :\")\r\n if not guess:\r\n print(\"Please enter a valid word.\")\r\n else:\r\n if game_instance.check_word(guess):\r\n print(\"Correct! You did it Champ!\")\r\n game_instance.calculate_score(self.frequency)\r\n self.instances.append(game_instance)\r\n obj.create_new_game()\r\n else:\r\n print(\"Wrong Guess. Try Again!\")",
"def user_guess():\n return list(input(\"What is your guess?\"))",
"def get_user_input(self):\r\n try:\r\n user_input = input('Guess a letter: ')\r\n print('\\n')\r\n if user_input.lower() in self.already_guessed:\r\n raise ValueError(YELLOW + 'You already guessed '\r\n f'{user_input.lower()}.\\n' + END)\r\n if len(user_input) == 0:\r\n raise ValueError(YELLOW + 'You didn\\'t enter a letter. '\r\n 'Please enter a letter between A-Z\\n' + END)\r\n if not user_input.isalpha():\r\n raise ValueError(YELLOW + 'You entered a number. '\r\n 'Please enter a letter between A-Z.\\n' + END)\r\n if len(user_input) > 1:\r\n raise ValueError(YELLOW + 'Please enter one letter.\\n' + END)\r\n except ValueError as error:\r\n print(error)\r\n self.get_user_input()\r\n else:\r\n if len(self.already_guessed) > 0: # prints previous guesses\r\n self.print_previous_guesses()\r\n if user_input.lower() in [letter.original.lower() for letter in\r\n self.active_phrase if letter != ' ']:\r\n for letter in self.active_phrase:\r\n if letter != ' ':\r\n letter.compare_guess(user_input) # checks guess\r\n self.active_phrase.print_phrase()\r\n else:\r\n self.lives -= 1\r\n print(f'You have {self.lives} out of 5 lives remaining!\\n')\r\n if user_input.lower() not in self.already_guessed:\r\n self.already_guessed.append(user_input.lower())\r\n self.active_phrase.print_phrase()",
"def pattern(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pattern\")",
"def pattern(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"pattern\")",
"def get_pattern(self, name):\n return self._pattern_reg[name]",
"def get_guess(already_guessed):\n\n while True:\n print('Guess a letter.')\n guess = (input()).lower()\n if len(guess) != 1:\n print('Please enter a single letter.')\n elif guess == ' ':\n print('Space is not a valid entry. Please enter a single letter.')\n elif guess in already_guessed:\n print('\"Already guessed the letter. Choose again.')\n elif guess not in 'abcdefghijklmnopqrstuvwxyz':\n print('Please enter a LETTER.')\n else:\n return guess",
"def guess(self, message, db_session):\n user = self.ts.get_user(message)\n if db_session.query(db.MiscValue).filter(db.MiscValue.mv_key == 'guessing-enabled').one().mv_value == 'True':\n msg_list = self.ts.get_human_readable_message(message).split(' ')\n if len(msg_list) > 1:\n guess = msg_list[1]\n if guess.isdigit() and int(guess) >= 0:\n self._set_current_guess(user, guess, db_session)\n self._add_to_whisper_queue(user, \"{} your guess has been recorded.\".format(user))\n else:\n self._add_to_whisper_queue(user, \"Sorry {}, that's not a non-negative integer.\".format(user))\n else:\n self._add_to_whisper_queue(user,\n \"Sorry {}, !guess must be followed by a non-negative integer.\".format(user))\n else:\n self._add_to_whisper_queue(user, \"Sorry {}, guessing is disabled.\".format(user))",
"def guess():\n word = request.args[\"word\"]\n board = session[\"board\"]\n\n # create response by the response of the function if word is valid\n response = boggle_game.check_valid_word(board, word)\n\n return jsonify({'result': response})",
"def eval_guess(self, Guess):\n\n\t\t# pulls comparison from win check and assigns peg responses \n\n\t\t# returns a list to be in hint_response\n\n\t\t# displays as part of big display in view.\n\n\t\t\"\"\"Borrow the logic from win_check to implement eval_guess. Use variables right and wrong to \n\t\tevaluate. Right = Black peg. Wrong = no peg. \n\n\t\tWhite will be generated from a third loop to compare the entire list\"\"\"\n\n\n\t\tpass",
"def pattern(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"pattern\")",
"def _parse_pattern(cls, pattern, default_pattern: str = \"*\") -> Pattern:\n pattern = pattern or default_pattern\n if pattern is None:\n return None\n\n return Pattern(pattern)",
"def test_guessing(self):\n self.classifier.guess(self.message)",
"def guess(cls, docstring):",
"async def _guess(self, ctx):\n reply = '\\n'\n for i, entry in enumerate(db.get_leaderboard(\n ctx.message.server.id,\n 'guess-leaderboard')):\n for key, value in entry.items():\n if key == \"discord_id\":\n name = self.get_name(ctx, value)\n elif key == 'date':\n date = value\n else:\n score = value\n reply += '{}. {} - {} ({})\\n'.format(\n i+1,\n score,\n name,\n datetime.datetime.fromtimestamp(\n int(date)).strftime('%d-%m-%Y')\n )\n await self.bot.say(reply)"
] | [
"0.55911654",
"0.55187875",
"0.5507677",
"0.5500603",
"0.5459489",
"0.54325235",
"0.53727347",
"0.52810353",
"0.52759",
"0.51961994",
"0.5180741",
"0.5148151",
"0.5114034",
"0.50629675",
"0.5012794",
"0.5007291",
"0.49607527",
"0.4953114",
"0.4934326",
"0.4934326",
"0.4837743",
"0.4833182",
"0.4827561",
"0.47882834",
"0.47829786",
"0.4779183",
"0.47734258",
"0.4764575",
"0.4763804",
"0.4760488"
] | 0.7146551 | 0 |
Restarts the timer and closes any existing progress bar. | def restart(self):
self.done()
self.counter = 0
self.start_time = time.time() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def restart_timer(self):\n self.log.info(\"{} timer restarted ({} seconds)\".format(self.name, self.interval))\n self.count = self.interval / self.sleep_chunk\n if not self.defer and self.interval > 0:\n self._callback()\n if self.start_event.is_set():\n self.reset_event.set()\n else:\n self.start_event.set()",
"def restart(self):\n self._start_time = None\n self.start()",
"def close_progress(self):\r\n\r\n pass",
"def restart(self):\n self.stop()\n self.start()",
"def restart(self):\n self.stop()\n self.start()",
"def stop_timer(self):\r\n self.countdownTimer.stop()",
"def restart(self):\n self.stop()\n self.start(init=False)",
"def restart(self):\n\n self.stop()\n self.start()",
"def close(self):\n if self._timer is not None:\n self._timer.cancel()\n self._timer = None",
"def restart():\n stop()\n start()",
"def timer(self):\n self.time_remaining -= 1\n if self.time_remaining > 0:\n Timer(1, self.timer).start()",
"def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()",
"def restart_motion_timer(self) -> None:\n if \"motion_timer\" in self.handles:\n self.adbase.cancel_timer(self.handles[\"motion_timer\"])\n self.handles.pop(\"motion_timer\")\n self.handles[\"motion_timer\"] = self.adbase.run_in(\n self.disable_area_motion, self.delay_off\n )",
"def restart(self):\n\t\treturn self.reset().start()",
"def reset_timer(self):\r\n self.time_minutes = 0\r\n self.time_seconds = 0",
"def timer_canceled(self, timer):\n try:\n try:\n timer.impltimer.stop()\n del timer.impltimer\n except (AttributeError, TypeError):\n pass\n finally:\n super(Hub, self).timer_canceled(timer)",
"def restart(self):\r\n pass",
"def restart(self):",
"def close(self) -> None:\n\n if not self.simple_tui:\n self.rich_progress_bar.stop()\n\n logging.shutdown()",
"def stop(self):\n self.setWindowTitle(self.name + ': stopped')\n self._timer.stop()",
"def reset_timer():\r\n window.after_cancel(timer)\r\n canvas.itemconfig(timer_text, text=f\"00:00\")\r\n pomodoro_title.config(text=\"Timer\", fg=GREEN)\r\n check_marks.config(text=\"\")",
"def reset_stop_timer(self) -> None: \r\n self.stop_timer = 0",
"def untie(self):\n self.timer_label = None",
"def Reset():\n #if timer.is_running():\n timer.stop()\n global n,message\n global total_stop\n global success_stop\n n = 0\n message = \"0:00.0\"\n total_stop=0\n success_stop=0",
"def restart(self):\n pass",
"def restart(self) -> None:",
"def reset():\n global counter, total_attempts, successful_stops\n timer.stop()\n counter = 0\n total_attempts = 0\n successful_stops = 0",
"def restart(self, delay=None):\n if self._timer:\n self._timer.cancel()\n if not delay:\n delay = self.delay\n self._timer = Timer(delay, self.callback)\n self._timer.daemon = True\n self._timer.start()",
"def stop_timer(self):\n self.end_time = datetime.now()",
"def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()"
] | [
"0.66411126",
"0.6284358",
"0.62715745",
"0.62477195",
"0.62477195",
"0.6240947",
"0.62338436",
"0.62116516",
"0.6179327",
"0.6083561",
"0.6083475",
"0.60694265",
"0.59795976",
"0.5975325",
"0.59243655",
"0.58290434",
"0.5823037",
"0.57699066",
"0.5765696",
"0.5754959",
"0.5740772",
"0.57318693",
"0.5721797",
"0.5711136",
"0.571063",
"0.5706461",
"0.56756395",
"0.56723636",
"0.5670013",
"0.5654577"
] | 0.6867762 | 0 |
Advances the progress bar. If visible, shows progress, otherwise updates in the background. If the time threshold has passed and the progress bar should appear, this method creates it. | def next(self):
if self.skip:
return
self.counter += 1
if self.pbar is None and (time.time() - self.start_time) > self.threshold:
self.pbar = tqdm(total=self.n, desc=self.title, initial=self.counter)
elif self.pbar is not None:
self.pbar.update(1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_progress_bar(self):\r\n self.progress[\"value\"] = self.progress_step",
"def notify_progress(self, ratio):\n self._progress_bar += ratio\n while self._progress_bar > self._offset_bar:\n self._offset_bar += 0.01\n self._progress_window.progress(100 * self._progress_bar)\n # print(100 * self._progress_bar)",
"def _show_time_updates(p_bar):\n while p_bar.total > p_bar.n:\n time.sleep(1)\n if p_bar.total > p_bar.n:\n p_bar.refresh()",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def incProgress(self, val):\n\n if val is not None:\n self._progressBar.show()\n self._progressBar.setTextVisible(True)\n self.progress = self.progress + val\n try:\n self._progressBar.setValue(self.progress)\n qApp.processEvents()\n except:\n pass\n else:\n self._progressBar.setTextVisible(False)\n self._progressBar.hide()\n self._progressBar.reset()\n\n if self.isHidden is True:\n self.isHidden = False\n self.show_()",
"def increment(self, length):\r\n self.progress_bar.update(length)",
"def ad_step_to_progress_bar(self, n):\r\n self.progress_step += n\r\n self.progress[\"value\"] = self.progress_step\r\n self.progress.update_idletasks()",
"def increase_progress(self, value):\r\n\r\n pass",
"def progress_status(self):\n from tqdm import tqdm\n pbar_a = tqdm(total=len(self.jobs), position=0)\n pbar_a.set_description('Submitted jobs ...')\n pbar_b = tqdm(total=self.n_submit_script, position=1)\n pbar_b.set_description('Running jobs ...')\n pbar_c = tqdm(total=self.n_submit_script, position=2)\n pbar_c.set_description('Completed jobs ...')\n pbar_d = tqdm(total=self.n_submit_script, position=3)\n pbar_d.set_description('Failed? jobs ...')\n while self.n_completed < self.n_submit_script:\n pbar_a.n = self.n_submitted\n pbar_b.n = self.n_running\n pbar_c.n = self.n_completed\n pbar_d.n = self.n_failed + self.n_unknown\n pbar_a.refresh()\n pbar_b.refresh()\n pbar_c.refresh()\n pbar_d.refresh()\n sleep(5)\n self.update_status()",
"def update_progressbar(self, count, value):\n self.status(\"Progress %s/%s\" % (value, count))",
"def set_progress(self, progress: float):",
"def updateAmount(self, newAmount = 0):\n if newAmount and self.starting_amount is None:\n self.starting_amount = newAmount\n self.starting_time = time.time()\n if newAmount < self.min: newAmount = self.min\n if newAmount > self.max: newAmount = self.max\n self.prev_amount = self.amount\n self.amount = newAmount\n\n # Figure out the new percent done, round to an integer\n diffFromMin = float(self.amount - self.min)\n percentDone = (diffFromMin / float(self.span)) * 100.0\n percentDone = int(round(percentDone))\n\n # Figure out how many hash bars the percentage should be\n allFull = self.width - 2\n numHashes = (percentDone / 100.0) * allFull\n numHashes = int(round(numHashes))\n\n # Build a progress bar with an arrow of equal signs; special cases for\n # empty and full\n\n if numHashes == 0:\n self.progBar = \"[>%s]\" % (' '*(allFull-1))\n elif numHashes == allFull:\n self.progBar = \"[%s]\" % ('='*allFull)\n else:\n self.progBar = \"[%s>%s]\" % ('='*(numHashes-1),\n ' '*(allFull-numHashes))\n \n if self.show_percentage:\n # figure out where to put the percentage, roughly centered\n percentPlace = (len(self.progBar) / 2) - len(str(percentDone))\n percentString = str(percentDone) + \"%\"\n else:\n percentPlace = (len(self.progBar) / 2) - len(str(percentDone))\n percentString = '%s/%s' % (self.amount, self.span)\n # slice the percentage into the bar\n self.progBar = ''.join([self.progBar[0:percentPlace], percentString,\n self.progBar[percentPlace+len(percentString):]\n ])\n if self.starting_amount is not None:\n amount_diff = self.amount - self.starting_amount\n if amount_diff:\n self.prev_time = self.current_time\n self.current_time = time.time()\n elapsed = self.current_time - self.starting_time\n eta = elapsed * (self.max - self.amount)/float(amount_diff)\n self.progBar += ' ETA:'+time_to_str(eta)",
"def progress_update(self):\n self._window.scan_progress.setValue(self.scan_progress)",
"def show_progressbar(self):\n\n self.progressframe = tk.Toplevel(self, background='white')\n self.progressframe.lift()\n self.progressframe.focus_force()\n self.progressframe.grab_set()\n self.progressframe.resizable(False, False)\n self.progressframe.minsize(width=200, height=50)\n progressbar = ttk.Progressbar(self.progressframe, mode='indeterminate', length=200)\n progressbar.pack(pady=(10, 0), padx=5)\n progressbar.start(10)\n progresslabel = tk.Label(self.progressframe, text='Generating BOM Comparison', background='white')\n progresslabel.pack(pady=(0, 10))",
"def advance():\n pg = ppv.progress + random.random() / 20\n if pg < 1:\n ppv.progress = pg\n ui.delay(advance, random.random() / 2)\n else:\n ppv.progress = 1",
"def tick(self):\n self.current_count += 1\n self.progress(self.current_count)",
"def on_timeout(self, data):\n new_value = self.progressbar.get_fraction() + 0.01\n\n if new_value > 1:\n return False\n\n self.progressbar.set_fraction(new_value)\n return True",
"def progress(self):\n if self.running:\n pass\n else:\n self._engine.progress()",
"def progress(self, value):\n self.step = float(value)\n self._draw()",
"def update_progress(progress, time):\n barLength = 30 # Modify this to change the length of the progress bar\n status = \"\"\n if isinstance(progress, int):\n progress = float(progress)\n if not isinstance(progress, float):\n progress = 0\n status = \"error: progress var must be float\\r\\n\"\n if progress < 0:\n progress = 0\n status = \"Halt...\\r\\n\"\n if progress >= 1:\n progress = 1\n status = \"Done...\\r\\n\"\n block = int(round(barLength * progress))\n text = \"\\rPercent: [{0}] {1:.2f}% --- {3:.2f} s. remain. {2}\".format(\n \"=\" * (block - 1) + \">\" + \" \" * (barLength - (block - 1) - 1), progress * 100, status, time)\n sys.stdout.write(text)\n sys.stdout.flush()",
"def time_remaining(self):\n elapsed_time = time.time() - self.start_time\n self.progressbar['value'] = progressbar.current\n time_remaining = round((1 - progressbar.current) * elapsed_time)\n # Disabled for Demo due to confusion\n # if time_remaining < 60:\n # self.progress_label.config(text=f'Estimated Time Remaining: {time_remaining} seconds')\n # elif 3600 > time_remaining > 60:\n # time_remaining = round(time_remaining / 60)\n # self.progress_label.config(text=f'Estimated TIme Remaining: {time_remaining} minutes')\n # elif time_remaining > 3600:\n # time_remaining = dt.timedelta(seconds=time_remaining)\n # self.progress_label.config(text=f'Estimated Time Remaining: {time_remaining}')",
"async def display_progress_bar(self, is_displayed):\n\t\tprint(\"DISPLAY PROGRESS BAR: \" + str(is_displayed))\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"display_progress_bar\": is_displayed\n\t\t\t}\n\t\t)",
"def print_progress_bar(self, iter_num, start_time):\n iteration = iter_num + 1\n prefix = \"Progress: \"\n length = 50\n fill = '█'\n percent = (\"{0:.\" + str(1) + \"f}\").format(100 *\n (iteration / float(self.num_games)))\n exact_progress = \"{}/{}\".format(iteration, self.num_games)\n filled_length = int(length * iteration // self.num_games)\n total_time = int(time()-start_time)\n time_remaining = (time() - start_time)/(float(iter_num)+0.1)\n time_remaining = str(int(time_remaining*(self.num_games-iter_num)))\n bars = fill * filled_length + '-' * (length - filled_length)\n\n print('\\r%s |%s| (%s) %s%% | ETA: %ss (%ss)\\t' %\n (prefix, bars, exact_progress,\n percent, time_remaining,\n total_time), end='\\r')\n\n # Print New Line on Complete\n if iteration >= self.num_games:\n print(\"\\r\\n\\r\\n\")",
"def updateProgress(self, msg):\n self.count += 1\n \n if self.count >= 20:\n self.Destroy()\n \n self.progress.SetValue(self.count)",
"def status_notify(self, fraction, progress_text, status_text=''):\n gtk.gdk.threads_enter()\n try: # coupling...\n if self._pdialog.top_widget:\n self._pdialog.progressbar.set_fraction(fraction)\n self._pdialog.progressbar.set_text(progress_text)\n self._pdialog.statustext.set_markup('<i>%s</i>' % status_text)\n finally:\n gtk.gdk.threads_leave()",
"def add_progressbar(self):\n\n self._progressBar = QProgressBar(self._splash)\n self._progressBar.setGeometry(self._splash.width() / 10, 8 * self._splash.height() / 10,\n 8 * self._splash.width() / 10, self._splash.height() / 10)\n self._progressBar.hide()",
"def _setProgress(self):\n\n self.progress = (self.iteration, self.iterationCount)",
"def trackProgress(target, message=\"\", updateRate=1):\n\n global _activeBar\n if _activeBar is not None:\n logging.getLogger(__name__).error(\"Cannot construct a new progress bar, \"\n \"another one is already active.\")\n raise RuntimeError(\"A progress bar is already active.\")\n\n try:\n _activeBar = makeProgressbar(target, message, updateRate)\n yield _activeBar\n _activeBar.finalize() # success => clean up\n\n except:\n # failure => leave bar visible and advance a line\n sys.stderr.write(\"\\n\")\n raise\n\n finally:\n # in any case the bar is now done\n _activeBar = None",
"def draw_progress_bar(self, col, row, size, ratio, color=GREEN):\n npixels = size * ratio\n for n in range(int(npixels)):\n self.putpixel(col + n, row, color)\n # flash an addiotional pixel as fraction indicator\n if npixels - int(npixels) > .5 and self.nframes & 1 == 0:\n self.putpixel(col + int(npixels), row, color)"
] | [
"0.6722962",
"0.6673535",
"0.6486935",
"0.6408159",
"0.6408159",
"0.6382689",
"0.63463795",
"0.6336971",
"0.6323932",
"0.63100183",
"0.6251043",
"0.6093328",
"0.60822016",
"0.6081106",
"0.6039662",
"0.59908193",
"0.5933977",
"0.59072167",
"0.5903473",
"0.5895819",
"0.5878664",
"0.58754796",
"0.5856501",
"0.58544827",
"0.58541465",
"0.5849341",
"0.57626057",
"0.57622206",
"0.574999",
"0.57054585"
] | 0.6813362 | 0 |
The worker function, invoked in a thread. 'nums' is a list of numbers to factor. The results are placed in outdict. | def worker(nums, outdict):
print(threading.current_thread().name)
print ("pid:", os.getpid())
for n in nums:
outdict[n] = factorize_naive(n) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def worker(nums, out_q):\n outdict = {}\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n print (\"data size:\", nums)\n for n in nums:\n outdict[n] = factorize_naive(n)\n out_q.put(outdict)",
"def worker(nums, outdict):\n for n in nums:\n outdict[n] = primes2(n)",
"def mock_workers(task, num_workers):\n results = [\n [{\n \"name\": \"tweet\",\n \"value\": \"%d. Trump Trump everywhere not a Hillary to see.\" % x\n }] for x in range(num_workers)]\n return results",
"def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)",
"def compute_metrics(self, results: list) -> dict:",
"def spawn_threads():\n t0 = threading.Thread(target=print_numbers, args=[10, 0.9, \"\"]) \n t1 = threading.Thread(target=print_numbers, args=[7, 1, \" \"])\n t0.start()\n t1.start()",
"def multiprocess(inputs: list, worker_class: Any, num_threads: int = 40):\n\n input_queue = Queue() # type: ignore\n output_queue = Queue() # type: ignore\n\n for input_elm in inputs:\n input_queue.put(input_elm)\n\n threads = [worker_class(input_queue, output_queue)\n for _ in range(num_threads)]\n \n for thread in threads:\n thread.start()\n \n for thread in threads:\n thread.join()\n\n return get_all_nowait(output_queue)",
"def worker(file_paths, out_queue):\n\t\toutdict = {}\n\t\tfor path in file_paths:\n\t\t\toutdict[n] = run_muscle(path)\n\t\tout_queue.put(outdict)",
"def sweep_threading(self,sweep_n,start,end,points,filename='./test.txt'):\n ###############################\n ##multithread preparation\n ##############################\n threads = 8\n points = points//threads*threads # points per thread\n self.result = [[0.0 for i in range(self.n+1)]for j in range(points)]#this is the matrix which store the result, it will be saved to file later.\n job = self.allocate_job(start,end,points,threads)\n\n \n ################################\n ##This are codes for progress bar\n ###############################\n prog = ProgressBar(0, points, 50, mode='fixed', char='#')\n ##the linear algebra start here\n a = np.zeros(self.N)\n a[self.N-1] = 1 #1 because rho_11+rho_22 ... =1\n a = np.matrix(a)\n a = a.T\n\n thread_list = []\n for x in range(threads):\n thread_list.append(Sweep_Thread(self.result,job[x],prog,self.system,self.nu2,a,self.add_freq,self.index,sweep_n,self.n))\n\n tStart = time.time() \n for t in thread_list:\n t.start()\n\n for t in thread_list:\n t.join()\n tStop = time.time()\n print\"spend\",(tStop - tStart),\"second\"\n \n self.sweep_save_file(filename,points)",
"def worker(num_loops, cnt):\t\n\n\tglobal mutex\n\n\tfor i in range(num_loops):\n\t\ttotal = 0\n\t\tinside =0\n\n\t\tfor j in range(1000):\n\t\t\tx = random.random()\n\t\t\ty = random.random()\n\n\t\t\tif (x*x + y*y) <= 1:\n\t\t\t\t\tinside += 1\n\n\t\t\ttotal += 1\n\n\t\tmutex.acquire()\n\t\tcnt.add(total, inside)\n\t\tmutex.release()",
"def parallel(\n fn,\n workers=10,\n return_results=True,\n identifiers=None,\n args=None,\n kwargs=None,\n):\n # Check user input\n if args is not None and kwargs is not None:\n err = 'Amount of args must match those of kwargs'\n assert len(args) == len(kwargs), err\n\n if (args is not None or kwargs is not None) and identifiers is not None:\n err = 'Amount of identifier must match those of kw/args'\n n_args = len(args) if args is not None else len(kwargs)\n assert n_args == len(identifiers), err\n\n # Preprocessing for arguments lists\n identifiers = [] if identifiers is None else identifiers\n args = [] if args is None else args\n kwargs = [] if kwargs is None else kwargs\n\n if len(args) == 0 and len(kwargs) == 0:\n args = [None]\n kwargs = [None]\n else:\n if len(args) == 0:\n args = [[] for _ in range(len(kwargs))]\n if len(kwargs) == 0:\n kwargs = [dict() for _ in range(len(args))]\n\n # Initialize all the futures\n executor = futures.ThreadPoolExecutor(max_workers=workers)\n _futures = [\n executor.submit(fn, *args[i], **kwargs[i])\n for i in range(len(args))\n ]\n\n # Return only futures when requested\n if not return_results:\n return _futures\n\n # Block until we received all results\n if len(identifiers) > 0:\n results = {}\n else:\n results = []\n\n for i, future in enumerate(_futures):\n result = future.result()\n\n if len(identifiers) > 0:\n results[identifiers[i]] = result\n else:\n results.append(result)\n\n return results",
"def sweep_multiprocessing(self,sweep_n,start,end,points,filename='./test.txt'):\n ###############################\n ##multiprocessing preparation\n ##############################\n core = 10\n points = points//core*core # points per thread\n self.result = [[0.0 for i in range(self.n+1)]for j in range(points)]#this is the matrix which store the result, it will be saved to file later.\n job = self.allocate_job(start,end,points,core)\n\n \n ################################\n ##This are codes for progress bar\n ###############################\n prog = ProgressBar(0, points, 50, mode='fixed', char='#')\n ##the linear algebra start here\n a = np.zeros(self.N)\n a[self.N-1] = 1 #1 because rho_11+rho_22 ... =1\n a = np.matrix(a)\n a = a.T\n\n done_queue = multiprocessing.Queue()\n process_list = []\n for x in range(core):\n process_list.append(multiprocessing.Process(target = sweep_mp,args = (job[x],self.system,self.nu2,a,self.add_freq,self.index,sweep_n,self.n,done_queue)))\n\n tStart = time.time()\n print 'start'\n for p in process_list:\n p.start()\n\n stop_num = 0\n while stop_num != core:\n a = done_queue.get()\n if a == 'STOP':\n stop_num += 1\n else:\n self.result[a[0]] = a[1]\n prog.increment_amount()\n print prog, '\\r',\n sys.stdout.flush()\n\n print '\\n'\n for p in process_list:\n p.join()\n print \"%s.exitcode = %s\" %(p.name, p.exitcode)\n\n tStop = time.time()\n print\"spend\",(tStop - tStart),\"second\"\n \n self.sweep_save_file(filename,points)",
"def evaluate(self, tick, task, inputs, nosend_ports=None, fail_on_unexpected_nosend=False):\n\n logger.debug(\"Transfers for job %s\" % tick)\n\n ports = []\n transfers = []\n transfer_results = {}\n for port, (valueid, worker) in inputs.iteritems():\n \n \n d = self.fetch_from(worker, valueid)\n \n def transfer_completed(transfer_result, valueid, port):\n if transfer_result: # `None` if the value was already present\n transfer_results[port] = transfer_result\n return self.get_value(valueid)\n \n\n d.addCallback(transfer_completed, valueid, port)\n ports.append(port)\n transfers.append(d)\n \n d = defer.DeferredList(transfers)\n \n def run(inputs):\n \"\"\"\n Runs in separate thread.\n \"\"\"\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)\n \n @twistit.yieldefer\n def got_all(results):\n \n logger.debug(\"Transfers for job %s finished\" % tick)\n \n values = []\n for success, result in results:\n if not success:\n if result.check(pickle.PickleError):\n raise pickle.PickleError(\"Failed to unpickle input of %r.%r: %s\" %(tick, port, result))\n else:\n result.raiseException()\n else:\n values.append(result)\n\n inputs = dict(zip(ports, values))\n \n evalresult = yield threads.deferToThread(run, inputs)\n \n if not isinstance(evalresult.result, dict) and not isinstance(evalresult.result, failure.Failure):\n raise ValueError(\"Evaluation of task %r did not produce a dict or a failure. Got %r.\" % (task, evalresult.result))\n \n defer.returnValue(evalresult)\n \n def task_completed(evalresult):\n if isinstance(evalresult.result, dict):\n \n # Injest values into our store and replace the eval results with ValueIds.\n outputs = evalresult.result\n outs = {}\n datasizes = {}\n for port, value in outputs.iteritems():\n valueid = ValueId(graph.Endpoint(tick, port))\n \n pickle_supported = True\n if nosend_ports and port in nosend_ports:\n pickle_supported = False\n \n try:\n size = self.set_value(valueid, \n value, \n pickle_supported, \n pickle_supported and fail_on_unexpected_nosend)\n except NoPickleError as e:\n e = NoPickleError(\"Value of output port %r cannot be pickled.\" % port,\n cause=e.cause)\n # TODO: memory leak. We should remove the values we've set in\n # previous loop iterations.\n raise e\n \n outs[port] = valueid\n if size is not None:\n datasizes[port] = size \n \n evalresult.result = outs\n evalresult.datasizes = datasizes\n evalresult.transfer_results = transfer_results\n return evalresult\n \n d.addCallback(got_all)\n d.addCallback(task_completed)\n return d",
"def manager(num_thrds, num_loops):\n\n\tmutex.acquire()\n\tcnt.reset()\n\tmutex.release()\n\n\t# initialize the thread pool\n\tthread_pool = []\n\n\tfor i in range(num_thrds):\n\t\tthrd = threading.Thread(target=worker, args=(num_loops, cnt))\n\t\tthread_pool.append(thrd)\n\n\t# start threads\n\tfor i in range(len(thread_pool)):\n\t\tthread_pool[i].start()\n\n\tfor i in range(len(thread_pool)):\n\t\tthreading.Thread.join(thread_pool[i])\n\n\t#cnt.display()",
"def __call__(self, q, threads = None):\n if threads is -1: threads = cpu_count()\n\n if threads is None:\n results = [self.evaluate(v) for v in q]\n elif type(threads) is int and threads > 0:\n workers = Pool(threads)\n results = workers.map(self.evaluate, q)\n else:\n raise ValueError('threads keyword must be either -1 or an integer greater than zero')\n\n mu = [ t[0] for t in results ]\n sig = [ t[1] for t in results ]\n return array(mu), array(sig)",
"def getResults(workers):\n results = []\n for worker in workers:\n results += worker.getResults()\n \n return results",
"def _passing_args_impl(self, pool_class_factory):\n DELTA = 12\n ITERATIONS = 100\n pool = pool_class_factory()\n\n pool.start(CoeffMultiplierWorker, {'coeff': DELTA})\n for i in range(ITERATIONS):\n pool.ventilate(message='Vent data {}'.format(i), value=i)\n\n all_results = [pool.get_results() for _ in range(ITERATIONS)]\n self.assertEqual({DELTA}, set(np.diff(sorted(all_results))))\n\n pool.stop()\n pool.join()",
"def receive_workers_output(node_request_map, results_list, free_nodes, command, idle_nodes):\n\n if dist.get_backend() == \"nccl\": # Async\n for node, req in node_request_map:\n if req.is_completed():\n result = build_metrics_dict(node) if command == COMMAND_TESTVAL else build_grads_dict(node)\n results_list.append(result)\n free_nodes.append(node)\n node_request_map.remove((node,req))\n print_rank(f\"Finished releasing the nodes {free_nodes}\", loglevel=logging.DEBUG)\n else: # Sync\n print_rank(f\"Waiting for a workers\", loglevel=logging.DEBUG)\n gather_objects = [(None,None,None) for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\" All workers have finished ... taking the remaining clients {len(output)}\", loglevel=logging.DEBUG)\n output = [e for i,e in enumerate(output) if i not in idle_nodes ] # Cleanup for idle workers\n results_list = results_list + output[1:]\n free_nodes = list(range(1, size()))\n \n return node_request_map, results_list, free_nodes",
"def worker_run():\n while True:\n print(\"worker: waiting for numdata_lock\")\n numdata_lock.acquire()\n print(\"worker: acquired numdata_lock\")\n print(\"The number {} is spelled '{}'\".format(numdata[\"int\"],numdata[\"name\"]))\n numdata_lock.release()\n time.sleep(1)",
"def _worker(self, results):\n keys = {\n \"test-certificate-verify\": {\n \"MD5 forced\": 2,\n \"TLSv1.1 signature in TLSv1.2 Certificate Verify\": 1,\n \"MITIGATION\": \"SLOTH\",\n },\n \"test-sig-algs\": {\"MD5 first\": 2, \"MITIGATION\": \"SLOTH\"},\n \"test-clienthello-md5\": {\n \"only-md5-rsa-signature_algorithm\": 1,\n \"unknown-signature_algorithm-numbers\": 1,\n \"MITIGATION\": \"SLOTH\",\n },\n \"test-tls13-pkcs-signature\": {\n \"rsa_pkcs1_md5 signature\": 1,\n \"MITIGATION\": \"SLOTH_MD5_SIGNATURE_TLS_1_3\",\n },\n }\n return self._obtain_results(results, keys)",
"def simulation(data_size : int,nbr_file : int, path : str, target):\n res = [0 for _ in range(data_size)]\n threads = []\n for i in range(data_size):\n threads.append(Thread(target = target, args = (nbr_file,path,res,i)))\n threads[i].start()\n\n for i in range(data_size):\n threads[i].join()\n\n return res",
"def iterate_mproc_map(wrap_func, iterate_vals, nb_workers=CPU_COUNT, desc='', ordered=True):\n iterate_vals = list(iterate_vals)\n nb_workers = 1 if not nb_workers else int(nb_workers)\n nb_workers = CPU_COUNT if nb_workers < 0 else nb_workers\n\n if desc is not None:\n pbar = tqdm.tqdm(total=len(iterate_vals), desc=str('%r @%i-threads' % (desc, nb_workers)))\n else:\n pbar = None\n\n if nb_workers > 1:\n logging.debug('perform parallel in %i threads', nb_workers)\n # Standard mproc.Pool created a demon processes which can be called\n # inside its children, cascade or multiprocessing\n # https://stackoverflow.com/questions/6974695/python-process-pool-non-daemonic\n\n # pool = mproc.Pool(nb_workers)\n # pool = NonDaemonPool(nb_workers)\n pool = ProcessPool(nb_workers)\n # pool = Pool(nb_workers)\n mapping = pool.imap if ordered else pool.uimap\n else:\n logging.debug('perform sequential')\n pool = None\n mapping = map\n\n for out in mapping(wrap_func, iterate_vals):\n pbar.update() if pbar else None\n yield out\n\n if pool:\n pool.close()\n pool.join()\n pool.clear()\n\n pbar.close() if pbar else None",
"def reduce_run():",
"def run_numbers():\n if run_nos:\n # Get task names\n tasks = []\n for rn in dcm_dict.keys():\n tasks.append(dcm_dict[rn]['task_name'])\n # Assign run numbers\n for tsk in set(tasks):\n n_runs = sum(i == tsk for i in tasks)\n if n_runs == 1:\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n # Add in the 'task' prefix required by BIDS format if missing from name\n if not tsk[0:4] == 'task':\n dcm_dict[rn]['out_name'] = 'task-'+tsk+'_run-01'\n else:\n dcm_dict[rn]['out_name'] = tsk+'_run-01'\n elif n_runs > 1:\n task_runs = []\n run_times = []\n for rn in dcm_dict.keys():\n if dcm_dict[rn]['task_name'] == tsk:\n task_runs.append(rn)\n run_times.append(dcm_dict[rn]['start_time'].timestamp())\n idx_order = sorted(range(len(run_times)), key=lambda k: run_times[k])\n for i in idx_order:\n if not tsk[0:4] == 'task':\n dcm_dict[task_runs[i]]['out_name'] = 'task-'+tsk+'_run-0'+str(i+1)\n else:\n dcm_dict[task_runs[i]]['out_name'] = tsk+'_run-0'+str(i+1)\n else:\n for rn in dcm_dict.keys():\n dcm_dict[rn]['out_name'] = dcm_dict[rn]['task_name']",
"def exec(list_req, wb,write,Total):\n ret = None\n\n if write==True:\n for tick in list_req:\n retrieve_score(wb,tick,increase=True,write = write)\n retrieve_score(wb,tick,increase=False,write = write) \n \n else:\n if Total == True:\n ret_inc = retrieve_score(wb,list_req[0],increase=True,write = write)\n ret_score = retrieve_score(wb,list_req[0],increase=False,write = write)\n for tick in list_req[1:]:\n ret_inc = ret_inc.append(retrieve_score(wb,tick,increase=True,write = write))\n ret_score = ret_score.append(retrieve_score(wb,tick,increase=False,write = write))\n \n else:\n ret_inc = []\n ret_score = []\n for tick in list_req[1:]:\n ret_inc.append(retrieve_score(wb,tick,increase=True,write = write))\n ret_score.append(retrieve_score(wb,tick,increase=False,write = write))\n\n\n ret = (ret_score,ret_inc)\n\n \n return ret",
"def stats_freq():\n\n # Get a worker number to position the progress bar\n global idxQueue\n thr_idx = idxQueue.get()\n\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} stats_freq()\")\n\n # Initialize a Counter object for each family\n freqs = {}\n for f in famlist:\n freqs[f] = Counter()\n\n # List all nt_names happening within a RNA family and store the counts in the Counter\n for f in tqdm(famlist, position=thr_idx+1, desc=f\"Worker {thr_idx+1}: Base frequencies\", unit=\"family\", leave=False):\n with sqlite3.connect(runDir + \"/results/RNANet.db\") as conn:\n conn.execute('pragma journal_mode=wal')\n counts = dict(sql_ask_database(conn, f\"SELECT nt_name, COUNT(nt_name) FROM (SELECT chain_id from chain WHERE rfam_acc='{f}') NATURAL JOIN nucleotide GROUP BY nt_name;\", warn_every=0))\n freqs[f].update(counts)\n \n # Create a pandas DataFrame, and save it to CSV.\n df = pd.DataFrame()\n for f in tqdm(famlist, position=thr_idx+1, desc=f\"Worker {thr_idx+1}: Base frequencies\", unit=\"family\", leave=False):\n tot = sum(freqs[f].values())\n df = pd.concat([ df, pd.DataFrame([[ format_percentage(tot, x) for x in freqs[f].values() ]], columns=list(freqs[f]), index=[f]) ])\n df = df.fillna(0)\n df.to_csv(runDir + \"/results/frequencies.csv\") \n idxQueue.put(thr_idx) # replace the thread index in the queue\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} finished\")\n # notify(\"Saved nucleotide frequencies to CSV file.\")",
"def compute_metrics(self, results: list) -> dict:\n dump(results, self.out_file_path)\n print_log(\n f'Results has been saved to {self.out_file_path}.',\n logger='current')\n return {}",
"def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)",
"def RUN(numTrials, rateMap, numPhotons=48, angularSize=10.0, outputSize=300, mcList='MCOut.pickle',HESS=False, Sig = -1 ,numProcs = 10):\r\n print 'Beginning MC Series\\nProgress'\r\n \r\n import FermiPSF, ParseFermi\r\n mcOut = []\r\n map = pickle.load(open(rateMap, \"r\" )) # load rate-map\r\n PSFTableFront = FermiPSF.PSF_130(convType='front') # load PSF front converting\r\n PSFTableBack = FermiPSF.PSF_130(convType='back') # load PSF back converting\r\n\r\n start = time.time();\r\n \r\n ppa = outputSize/angularSize # pixel per degree\r\n\r\n # Import background template\r\n bgmap = 'BGRateMap.pickle'\r\n if (HESS == True):\r\n bgmap = 'BGRateMap_HESS_2_deg.pickle'\r\n \r\n bgTemplate = pickle.load(open(bgmap , \"r\" ))\r\n \r\n mcOut = np.zeros(numTrials)\r\n p = pool.Pool(numProcs)\r\n \r\n partial_MC_THREAD = partial( MC_THREAD, map = map,bgTemplate=bgTemplate,PSFTableFront=PSFTableFront, PSFTableBack=PSFTableBack, HESS=HESS, angularSize=angularSize, numPhotons=numPhotons, outputSize=outputSize,Sig = Sig)\r\n mcOut = p.map(partial_MC_THREAD, mcOut)\r\n \r\n# for i in range(numTrials): \r\n# # Build the background \r\n## background = Build_Background_Sideband(bgMean, lowSideband, highSideband, PSFTable)\r\n# background = Build_Background_Template(bg, bgTemplate, PSFTableFront, PSFTableBack,flatLevel = 0.0,HESS= HESS,angularSize = angularSize)\r\n# # Compute number of source photons\r\n# numMC = numPhotons - len(background[0])\r\n# # Run MC for source photons \r\n# data = MC(map,numMC,angularSize,outputSize,PSFTableFront, PSFTableBack,HESS=HESS)\r\n# # Append data\r\n# mcOut.append((data[0]+background[0], data[1]+background[1]))\r\n# \r\n# # Compute Speed Statistics\r\n# sys.stdout.write('\\r' + str(i+1)+'/'+str(numTrials)) \r\n# sys.stdout.flush()\r\n elapsed = time.time()-start;\r\n if (elapsed != 0.0):\r\n print '\\nSimulations Completed in', elapsed, 's', '(',numTrials/elapsed, ' sims per second)'\r\n \r\n outFile = open(mcList, \"wb\" )\r\n pickle.dump(mcOut, outFile)\r\n print 'Results saved to ', mcList\r\n return mcOut",
"def solution(nums):\n solution = Solution()\n output = solution.threeSum(nums)\n\n print(output)"
] | [
"0.79976565",
"0.7521205",
"0.56689334",
"0.56155014",
"0.5509319",
"0.5476741",
"0.53603804",
"0.53469855",
"0.53294677",
"0.53245115",
"0.53092897",
"0.53065777",
"0.53054893",
"0.5302376",
"0.5263724",
"0.5257976",
"0.51859593",
"0.5184734",
"0.5166715",
"0.51412565",
"0.5122664",
"0.51180834",
"0.5105005",
"0.50843304",
"0.5064053",
"0.5057585",
"0.50565886",
"0.5044967",
"0.5044508",
"0.5011129"
] | 0.8538493 | 0 |
The worker function, invoked in a process. 'nums' is a list of numbers to factor. The results are placed in a dictionary that's pushed to a queue. | def worker(nums, out_q):
outdict = {}
print(threading.current_thread().name)
print ("pid:", os.getpid())
print ("data size:", nums)
for n in nums:
outdict[n] = factorize_naive(n)
out_q.put(outdict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def worker(nums, outdict):\n print(threading.current_thread().name)\n print (\"pid:\", os.getpid())\n for n in nums:\n outdict[n] = factorize_naive(n)",
"def worker(nums, outdict):\n for n in nums:\n outdict[n] = primes2(n)",
"def worker(file_paths, out_queue):\n\t\toutdict = {}\n\t\tfor path in file_paths:\n\t\t\toutdict[n] = run_muscle(path)\n\t\tout_queue.put(outdict)",
"def process_results(refresh_count, output_dir, ext_queue, result_queue,\n num_of_workers=8):\n workers_dict = {} # keep track of worker processes\n input_queue = Queue() # asynchronously feed workers task to do \n worker_output_queue = Queue() # output queue from workers\n ack_queue = Queue()\n bug_dict = {} # dict to keep track of how many duplicates of each bug, if\n # exists\n try:\n # separate the non-ads from the ads for ease of handchecking\n os.makedirs(output_dir)\n os.makedirs(os.path.join(output_dir, 'notad'))\n except OSError:\n # Directory is created, Okay to pass\n pass\n\n for i in range(num_of_workers):\n p = Process(target=curl_worker, args=(output_dir, input_queue,\\\n worker_output_queue, i, ack_queue))\n p.start()\n workers_dict[i] = p\n # uses a pool nodesurl' workers\n # curl_worker_pool = Pool(processes=8)\n # manager = Manager()\n # curl_result_queue = manager.Queue()\n \n dl_counter = 0 # keep track of how many bugs downloaded\n while True:\n try:\n found_bugs = json.loads(ext_queue.get(block=True, timeout=2))\n except Exception:\n LOG.debug('No more bugs found, break out of queue')\n break\n\n for entry in found_bugs:\n bug = parse_buginfo(entry)\n try:\n # matched an entry in the bugdict, incr count and continue\n bug_dict[bug] += 1\n continue\n except KeyError:\n bug_dict[bug] = 1 \n\n try:\n saved_location ='Visit%d_%s%d' % (refresh_count, bug.get_name(), dl_counter)\n dl_counter += 1\n save_to_path = os.path.join( output_dir, '%s' % saved_location)\n input_queue.put((saved_location, save_to_path, bug))\n except Exception as e:\n LOG.exception('%s' % e)\n\n for i in range(num_of_workers):\n # send stop signal\n input_queue.put((\"STOP\",))\n \n stopped = 0\n while stopped < len(workers_dict):\n ack = ack_queue.get()\n p = workers_dict[ack]\n p.join(timeout=1)\n if p.is_alive():\n p.terminate()\n LOG.debug('terminating process %d' % ack)\n stopped += 1\n \n while not worker_output_queue.empty():\n # receive results from the worker\n cbug = worker_output_queue.get()\n # ugly code here\n bugcount = bug_dict[cbug]\n del bug_dict[cbug]\n bug_dict[cbug] = bugcount\n\n with open( os.path.join(output_dir, 'bug_dict%d.pkl' % refresh_count), 'w') as fwtr:\n cPickle.dump(bug_dict, fwtr)\n result_queue.put(bug_dict)\n return",
"def putting_on_queue(*args):\n results.put(main_func(*args))",
"def __init__ (self, *funcs_workers):\n self.numpools = len(funcs_workers)\n self.numworkerslist = []\n self.queues = [Queue() for _ in xrange(self.numpools+1)]\n for i, (func, numworkers) in enumerate(funcs_workers):\n self.numworkerslist.append(numworkers)\n for _ in xrange(numworkers):\n Process(target=worker, args=(\n func, self.queues[i], self.queues[i+1]\n )).start()",
"def sweep_multiprocessing(self,sweep_n,start,end,points,filename='./test.txt'):\n ###############################\n ##multiprocessing preparation\n ##############################\n core = 10\n points = points//core*core # points per thread\n self.result = [[0.0 for i in range(self.n+1)]for j in range(points)]#this is the matrix which store the result, it will be saved to file later.\n job = self.allocate_job(start,end,points,core)\n\n \n ################################\n ##This are codes for progress bar\n ###############################\n prog = ProgressBar(0, points, 50, mode='fixed', char='#')\n ##the linear algebra start here\n a = np.zeros(self.N)\n a[self.N-1] = 1 #1 because rho_11+rho_22 ... =1\n a = np.matrix(a)\n a = a.T\n\n done_queue = multiprocessing.Queue()\n process_list = []\n for x in range(core):\n process_list.append(multiprocessing.Process(target = sweep_mp,args = (job[x],self.system,self.nu2,a,self.add_freq,self.index,sweep_n,self.n,done_queue)))\n\n tStart = time.time()\n print 'start'\n for p in process_list:\n p.start()\n\n stop_num = 0\n while stop_num != core:\n a = done_queue.get()\n if a == 'STOP':\n stop_num += 1\n else:\n self.result[a[0]] = a[1]\n prog.increment_amount()\n print prog, '\\r',\n sys.stdout.flush()\n\n print '\\n'\n for p in process_list:\n p.join()\n print \"%s.exitcode = %s\" %(p.name, p.exitcode)\n\n tStop = time.time()\n print\"spend\",(tStop - tStart),\"second\"\n \n self.sweep_save_file(filename,points)",
"def worker(self, q, return_dict):\n pid = os.getpid()\n while True:\n qqq = q.get()\n if qqq == 'DONE':\n # print('proc =', os.getpid())\n break\n\n (idx, d) = qqq\n mol_id = d[0]\n smi = d[1]\n # print screening processing in every pout step\n if self.pout != 0:\n if idx % self.pout == self.pout-1:\n print(\"processing: \", idx+1, flush=True)\n result_dict = self.simulation_process(idx, mol_id, smi, pid)\n return_dict[idx] = result_dict",
"def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)",
"def multiprocess(inputs: list, worker_class: Any, num_threads: int = 40):\n\n input_queue = Queue() # type: ignore\n output_queue = Queue() # type: ignore\n\n for input_elm in inputs:\n input_queue.put(input_elm)\n\n threads = [worker_class(input_queue, output_queue)\n for _ in range(num_threads)]\n \n for thread in threads:\n thread.start()\n \n for thread in threads:\n thread.join()\n\n return get_all_nowait(output_queue)",
"def worker_func(worker_id, w2t_m_queue, events, t2w_d_manager):\n average_iteration_time = 0\n worker_nn = create_neural_network()\n iteration_time = time.time()\n for i in range(ITERATIONS):\n data_point = create_data_point(worker_nn)\n events[\"Workers_can_proceed\"].clear()\n w2t_m_queue.put(data_point)\n # Signal trainer that this worker has placed its data point this iteration\n events[worker_id].set()\n average_iteration_time += (time.time() - iteration_time)\n # Have worker wait until trainer is done processing this iteration\n events[\"Workers_can_proceed\"].wait()\n iteration_time = time.time()\n # Obtain data trainer has placed into shared manager (data is weights of network)\n shared_data = t2w_d_manager[0]\n worker_nn.set_weights(shared_data)\n\n average_iteration_time /= ITERATIONS\n print(\"Worker \" + str(worker_id) + \" average put time: \" + str.format('{0:.6f}', (average_iteration_time*1000)) + \"ms\")",
"def worker_run():\n while True:\n print(\"worker: waiting for numdata_lock\")\n numdata_lock.acquire()\n print(\"worker: acquired numdata_lock\")\n print(\"The number {} is spelled '{}'\".format(numdata[\"int\"],numdata[\"name\"]))\n numdata_lock.release()\n time.sleep(1)",
"def _process_worker(call_queue, result_queue):\n while True:\n call_item = call_queue.get(block=True)\n if call_item is None:\n # Wake up queue management thread\n result_queue.put(os.getpid())\n return\n try:\n r = call_item.fn(*call_item.args, **call_item.kwargs)\n except BaseException as e:\n exc = _ExceptionWithTraceback(e, e.__traceback__)\n result_queue.put(_ResultItem(call_item.work_id, exception=exc))\n logger.exception(e) # 主要是直接显示错误。\n else:\n result_queue.put(_ResultItem(call_item.work_id,\n result=r))",
"def worker_function(taskQ, resultQ):\n \n while True:\n try: ivel = taskQ.get(block=True, timeout=10)# try to get the next task, allow some time for process clash (ivel number)\n except queue.Empty: break# kill process if no more tasks left\n example = generate_example(ivel)\n resultQ.put(example)# push the example to the results queue",
"def _process_data(f, work_queue, results_queue):\n for element in iter(work_queue.get, FINISHED):\n try:\n results_queue.put(f(element))\n except Exception, work_error:\n LOG.critical('parallel_pc Error: {0}\\n\\n\\tconfig settings {1}\\n'.format(work_error, element))\n results_queue.put(FINISHED)",
"def main(config):\n all_procs = []\n result_q = mp.Queue()\n for seed in config[\"seeds\"]:\n config[\"seed\"] = seed\n p = mp.Process(target=run, args=(config, result_q))\n p.start()\n all_procs.append(p)\n\n for p in all_procs:\n p.join()\n\n all_returns = [result_q.get() for p in all_procs]\n mean_per_restart = np.mean(all_returns, axis=1)\n mean, std = np.mean(mean_per_restart), np.std(mean_per_restart)\n\n # Return the negative since we're minimizing the function\n # .. the metric minimized is suggested from Duan et al. (2016)\n return -(mean - std)",
"def worker1() -> None:\n x = 10\n while x > 0:\n logging.info('Info from Process1 {0}'.format(x))\n time.sleep(0.25)\n x -= 1",
"def mock_workers(task, num_workers):\n results = [\n [{\n \"name\": \"tweet\",\n \"value\": \"%d. Trump Trump everywhere not a Hillary to see.\" % x\n }] for x in range(num_workers)]\n return results",
"def mprocessing(nprocs, lockdb, running, mutex, itemslist, a_fn, cur):\n # proc_pool = Local variable proc_pool for Pool of processes\n # log_level = log_level\n # count_total = Total counter of items to distribute/play/indicate progress\n # len(itemslist)\n\n log_level = logging.getLogger().getEffectiveLevel()\n logging.info('===mprocessing [%s] target_fn():[%s] nprocs:[%s]',\n __name__, a_fn.__name__, nprocs)\n # if log_level <= logging.WARNING:\n # if args is not None:\n # for i, arg in enumerate(args):\n # logging.info('===mprocessing f():[%s] arg[%s]={%s}',\n # a_fn.__name__, i, arg)\n\n # if __name__ == '__main__':\n logging.debug('===Multiprocessing=== Setting up logger!')\n # CODING No need for such low level debugging to stderr\n # multiprocessing.log_to_stderr()\n logger = multiprocessing.get_logger()\n logger.setLevel(log_level)\n\n logging.debug('===Multiprocessing=== Logging defined!')\n\n # ---------------------------------------------------------\n # chunk\n #\n # Divides an iterable in slices/chunks of size size\n #\n def chunk(iter_list, size):\n \"\"\"\n Divides an iterable in slices/chunks of size size\n\n >>> for a in chunk([ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3):\n ... len(a)\n 3\n 3\n 3\n 1\n \"\"\"\n iter_list = iter(iter_list)\n # lambda: creates a returning expression function\n # which returns slices\n # iter, with the second argument () stops creating\n # iterators when it reaches the end\n return iter(lambda: tuple(islice(iter_list, size)), ())\n\n proc_pool = []\n lockdb = multiprocessing.Lock()\n running = multiprocessing.Value('i', 0)\n mutex = multiprocessing.Lock()\n count_total = len(itemslist)\n\n size = (len(itemslist) // int(nprocs)) \\\n if ((len(itemslist) // int(nprocs)) > 0) \\\n else 1\n\n logging.debug('len(itemslist):[%s] int(nprocs):[%s] size per process:[%s]',\n len(itemslist), int(nprocs), size)\n\n # Split itemslist in chunks to distribute accross Processes\n for splititemslist in chunk(itemslist, size):\n logging.warning('===Actual/Planned Chunk size: [%s]/[%s]',\n len(splititemslist), size)\n logging.debug('===type(splititemslist)=[%s]', type(splititemslist))\n logging.debug('===Job/Task Process: Creating...')\n proc_task = multiprocessing.Process(\n target=a_fn, # argument function\n args=(lockdb,\n running,\n mutex,\n splititemslist,\n count_total,\n cur,))\n proc_pool.append(proc_task)\n logging.debug('===Job/Task Process: Starting...')\n proc_task.start()\n NPR.niceprint('===Job/Task Process: [{!s}] Started '\n 'with pid:[{!s}]'\n .format(proc_task.name,\n proc_task.pid),\n verbosity=3,\n logalso=logging.DEBUG)\n\n # Check status of jobs/tasks in the Process Pool\n if log_level <= logging.DEBUG:\n NPR.niceprint('===Checking Processes launched/status:',\n verbosity=3, logalso=logging.DEBUG)\n for j in proc_pool:\n NPR.niceprint('{!s}.is_alive = {!s}'.format(j.name, j.is_alive()),\n verbosity=3, logalso=logging.DEBUG)\n\n # Regularly print status of jobs/tasks in the Process Pool\n # Prints status while there are processes active\n # Exits when all jobs/tasks are done.\n while True:\n if not any(multiprocessing.active_children()):\n logging.debug('===No active children Processes.')\n break\n for prc in multiprocessing.active_children():\n logging.debug('===%s.is_alive = %s', prc.name, prc.is_alive())\n proc_task_active = prc\n NPR.niceprint('===Will wait for 60 on {!s}.is_alive = {!s}'\n .format(proc_task_active.name,\n proc_task_active.is_alive()),\n verbosity=3, logalso=logging.INFO)\n\n proc_task_active.join(timeout=60)\n NPR.niceprint('===Waited for 60s on '\n '{!s}.is_alive = {!s}'\n .format(proc_task_active.name,\n proc_task_active.is_alive()),\n verbosity=3, logalso=logging.INFO)\n\n # Wait for join all jobs/tasks in the Process Pool\n # All should be done by now!\n for j in proc_pool:\n j.join()\n NPR.niceprint('==={!s} (is alive: {!s}).exitcode = {!s}'\n .format(j.name, j.is_alive(), j.exitcode),\n verbosity=2)\n\n logging.warning('===Multiprocessing=== pool joined! '\n 'All processes finished.')\n\n # Will release (set to None) the lockdb lock control\n # this prevents subsequent calls to\n # use_lock( nuLockDB, False)\n # to raise exception:\n # ValueError('semaphore or lock released too many times')\n logging.info('===Multiprocessing=== pool joined! '\n 'Is lockdb None? [%s]. Setting lockdb to None anyhow.',\n lockdb is None)\n lockdb = None\n\n # Show number of total files processed\n NPR.niceprocessedfiles(running.value, count_total, True)\n\n return True",
"def start_workers(w2t_m_queue, events, t2w_d_manager):\n start_time = time.time()\n print(\"*********************************************************************\")\n print(\"Initializing workers...\")\n workers = []\n for i in range(NUM_WORKERS):\n worker = mp.Process(target=worker_func, args=(i, w2t_m_queue, events, t2w_d_manager))\n worker.start()\n workers.append(worker)\n print(\"Workers initialized.\")\n print(\"Initialization time elapsed: \" + str.format('{0:.6f}', (time.time() - start_time)*1000) + \"ms\")\n print(\"*********************************************************************\")\n return workers",
"def evaluate(self, tick, task, inputs, nosend_ports=None, fail_on_unexpected_nosend=False):\n\n logger.debug(\"Transfers for job %s\" % tick)\n\n ports = []\n transfers = []\n transfer_results = {}\n for port, (valueid, worker) in inputs.iteritems():\n \n \n d = self.fetch_from(worker, valueid)\n \n def transfer_completed(transfer_result, valueid, port):\n if transfer_result: # `None` if the value was already present\n transfer_results[port] = transfer_result\n return self.get_value(valueid)\n \n\n d.addCallback(transfer_completed, valueid, port)\n ports.append(port)\n transfers.append(d)\n \n d = defer.DeferredList(transfers)\n \n def run(inputs):\n \"\"\"\n Runs in separate thread.\n \"\"\"\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)\n \n @twistit.yieldefer\n def got_all(results):\n \n logger.debug(\"Transfers for job %s finished\" % tick)\n \n values = []\n for success, result in results:\n if not success:\n if result.check(pickle.PickleError):\n raise pickle.PickleError(\"Failed to unpickle input of %r.%r: %s\" %(tick, port, result))\n else:\n result.raiseException()\n else:\n values.append(result)\n\n inputs = dict(zip(ports, values))\n \n evalresult = yield threads.deferToThread(run, inputs)\n \n if not isinstance(evalresult.result, dict) and not isinstance(evalresult.result, failure.Failure):\n raise ValueError(\"Evaluation of task %r did not produce a dict or a failure. Got %r.\" % (task, evalresult.result))\n \n defer.returnValue(evalresult)\n \n def task_completed(evalresult):\n if isinstance(evalresult.result, dict):\n \n # Injest values into our store and replace the eval results with ValueIds.\n outputs = evalresult.result\n outs = {}\n datasizes = {}\n for port, value in outputs.iteritems():\n valueid = ValueId(graph.Endpoint(tick, port))\n \n pickle_supported = True\n if nosend_ports and port in nosend_ports:\n pickle_supported = False\n \n try:\n size = self.set_value(valueid, \n value, \n pickle_supported, \n pickle_supported and fail_on_unexpected_nosend)\n except NoPickleError as e:\n e = NoPickleError(\"Value of output port %r cannot be pickled.\" % port,\n cause=e.cause)\n # TODO: memory leak. We should remove the values we've set in\n # previous loop iterations.\n raise e\n \n outs[port] = valueid\n if size is not None:\n datasizes[port] = size \n \n evalresult.result = outs\n evalresult.datasizes = datasizes\n evalresult.transfer_results = transfer_results\n return evalresult\n \n d.addCallback(got_all)\n d.addCallback(task_completed)\n return d",
"def parallel_work(jobs, nr_of_threads):\n work_queue = Queue()\n result_queue = Queue()\n result = {}\n\n for job in jobs:\n work_queue.put(job)\n\n if nr_of_threads > len(jobs):\n nr_of_threads = len(jobs)\n\n for i in range(nr_of_threads):\n worker = Process(target=check_plugin, args=(work_queue,result_queue))\n worker.start()\n\n while len(result.keys()) < len(jobs):\n data = result_queue.get()\n\n if \" | \" in data[1]:\n (status, output) = data[1].split(\" | \")\n else:\n status = \"UNKNOWN\"\n output = data[1]\n\n result[data[0]] = {\"status\": status, \"output\": output}\n #print \"Host \" + data[0] + \" \" + status\n\n return result",
"def stats_freq():\n\n # Get a worker number to position the progress bar\n global idxQueue\n thr_idx = idxQueue.get()\n\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} stats_freq()\")\n\n # Initialize a Counter object for each family\n freqs = {}\n for f in famlist:\n freqs[f] = Counter()\n\n # List all nt_names happening within a RNA family and store the counts in the Counter\n for f in tqdm(famlist, position=thr_idx+1, desc=f\"Worker {thr_idx+1}: Base frequencies\", unit=\"family\", leave=False):\n with sqlite3.connect(runDir + \"/results/RNANet.db\") as conn:\n conn.execute('pragma journal_mode=wal')\n counts = dict(sql_ask_database(conn, f\"SELECT nt_name, COUNT(nt_name) FROM (SELECT chain_id from chain WHERE rfam_acc='{f}') NATURAL JOIN nucleotide GROUP BY nt_name;\", warn_every=0))\n freqs[f].update(counts)\n \n # Create a pandas DataFrame, and save it to CSV.\n df = pd.DataFrame()\n for f in tqdm(famlist, position=thr_idx+1, desc=f\"Worker {thr_idx+1}: Base frequencies\", unit=\"family\", leave=False):\n tot = sum(freqs[f].values())\n df = pd.concat([ df, pd.DataFrame([[ format_percentage(tot, x) for x in freqs[f].values() ]], columns=list(freqs[f]), index=[f]) ])\n df = df.fillna(0)\n df.to_csv(runDir + \"/results/frequencies.csv\") \n idxQueue.put(thr_idx) # replace the thread index in the queue\n setproctitle(f\"RNANet statistics.py Worker {thr_idx+1} finished\")\n # notify(\"Saved nucleotide frequencies to CSV file.\")",
"def Worker(queue, out_queue):\n while not queue.empty() and Worker.running:\n item = queue.get(False)\n if not item:\n break\n results = RunGCC(item[0], item[1])\n out_queue.put(results)",
"def __call__(self, q, threads = None):\n if threads is -1: threads = cpu_count()\n\n if threads is None:\n results = [self.evaluate(v) for v in q]\n elif type(threads) is int and threads > 0:\n workers = Pool(threads)\n results = workers.map(self.evaluate, q)\n else:\n raise ValueError('threads keyword must be either -1 or an integer greater than zero')\n\n mu = [ t[0] for t in results ]\n sig = [ t[1] for t in results ]\n return array(mu), array(sig)",
"def process():",
"def worker_func(queue_in, queue_out, model_type, hidden_size, novelty_use, env_name, noise_std, action_type):\r\n env = gym.make(env_name)\r\n \r\n cache = {} # to store population / networks\r\n \r\n while True:\r\n parents_seeds = queue_in.get()\r\n if parents_seeds == None:\r\n break\r\n new_cache = {}\r\n # for each network seeds \r\n for seeds in parents_seeds:\r\n # if seed history exist\r\n if len(seeds) > 1:\r\n net = cache.get(seeds[:-1])#\r\n # check if network already exists\r\n if net is not None:\r\n # if exist mutate on the new given seed -> the last in the list\r\n net = mutate(net, seeds[-1], noise_std)\r\n else:\r\n # if not exist build the net with the seed history\r\n net = build_net(env, seeds, model_type, hidden_size, noise_std, action_type)\r\n else:\r\n # since no seed history exist -> build network\r\n net = build_net(env, seeds, model_type, hidden_size, noise_std, action_type)\r\n \r\n # saves the networks in a cache \r\n new_cache[seeds] = net\r\n # evaluate new network mutation\r\n reward, steps, bc = evaluate(env, net)\r\n queue_out.put(OutputItem(seeds=seeds, reward=reward, steps=steps, bc=bc))\r\n # after evaluating all seeds the worker sets the new_cache with saved nets to the current cache\r\n cache = new_cache",
"def worker(my_idx, inq, outq):\n print(\"worker %d: starting\" % my_idx)\n backoff = .001\n while True:\n cmd = inq.get()\n if cmd is None:\n break\n ridx, creds, cmds = cmd\n backoff = max(backoff / 2, 0.001)\n while True:\n try:\n responses = Gmail.batch_executor(creds, cmds)\n except Gmail.UserRateException:\n print(f'worker {my_idx}: backoff {backoff} sec')\n sleep(backoff)\n backoff = min(backoff * 2, 1.0)\n except Exception as ex:\n outq.put([ridx, ex])\n break\n else:\n outq.put([ridx, responses])\n break\n inq.task_done()\n print(\"worker %d stoping\" % my_idx)",
"def worker(**kwargs):\n\t\tident = kwargs[\"ident\"]\n\t\twhile True:\n\t\t\titem = worker_queue.get()\n\t\t\tif item is None:\n\t\t\t\tbreak\n\t\t\tworker_function(ident, item)\n\t\t\tworker_queue.task_done()",
"def _create_workers(self, start=True):\n\n bearer = api_client.get_bearer_token()\n account = api_client.account_id_from_jwt(bearer.value)\n LOGGER.info(\"account: %s\", account)\n\n project = self.args.get(\"project\") or None\n LOGGER.info(\"project: %s\", project)\n\n location = self.args.get(\"location\") or None\n LOGGER.info(\"location: %s\", location)\n\n thread_count = self.args.get(\"thread_count\") or 1\n LOGGER.info(\"thread_count: %s\", thread_count)\n\n # CREATE WORKER PROCESSES\n workers = {}\n\n # Create DownloadWorker processes\n for _ in range(thread_count):\n\n # Create a process-safe run_state object for controlling process\n # run_state = multiprocessing.Array('c', \"stoppingorstuff\")\n global RUN_STATE\n wrk = UploaderWorker(\n RUN_STATE,\n self._results_queue,\n account=account,\n project=project,\n location=location)\n workers[wrk] = RUN_STATE\n\n log_history_wrk = self.create_log_history()\n\n workers[log_history_wrk] = RUN_STATE\n\n if start:\n for wrkr in workers:\n wrkr.start()\n time.sleep(.5)\n\n return workers"
] | [
"0.7811158",
"0.7189279",
"0.5911946",
"0.58161217",
"0.5799994",
"0.5654354",
"0.56341344",
"0.5627689",
"0.55830336",
"0.55784506",
"0.5456631",
"0.54565054",
"0.5428179",
"0.53641945",
"0.5327899",
"0.53268",
"0.53215057",
"0.5303377",
"0.527345",
"0.5256457",
"0.52415097",
"0.5232646",
"0.52181774",
"0.52178943",
"0.5192748",
"0.5176073",
"0.5170956",
"0.5165329",
"0.51647747",
"0.51600015"
] | 0.7874289 | 0 |
Merge a sequence of operations into a crossproduct tree. | def merge(from_args):
assert len(from_args) > 0
def cross(x, y):
return algebra.CrossProduct(x, y)
from_ops = from_args.values()
op = reduce(cross, from_ops)
return (op, __calculate_offsets(from_args)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compose(*ops):\n if len(ops) == 0:\n return [0, 1, 2, 3, 4, 5, 6, 7]\n if len(ops) == 1:\n return ops[0]\n if len(ops) == 2:\n op1, op2 = ops\n return [op2[op1[v]] for v in range(8)]\n op1 = ops[0]\n rest = ops[1:]\n return compose(op1, compose(*rest))",
"def cartesianproduct(lists):\r\n return reduce(appendEs2Sequences,lists,[])",
"def FO_Operation_Product(operations, d_universes):\n @FO_Operation_decorator(list(product(*d_universes)), operations[0].arity())\n def product_op(*args):\n result = []\n for i, t in enumerate(zip(*args)):\n result.append(operations[i](*t))\n return tuple(result)\n\n return product_op",
"def _product(self, args):\n pools = map(tuple, args) #within original version args defined as *args\n result = [[]]\n for pool in pools:\n result = [x + [y] for x in result for y in pool]\n return result",
"def flatten(self):\n to_remove = []\n for elem in self.operands:\n # if element belong to same class (nested And's, Or's)\n if isinstance(elem, self.__class__):\n # recursive flattening first\n elem.flatten()\n # remove from current list\n to_remove.append(elem)\n\n # add new elements\n for elem in to_remove:\n self.operands.remove(elem)\n self.operands.extend(elem.operands)",
"def _operation_tree(self):\n\n # initial state\n i = 0\n level = 0\n stack = []\n current = None\n\n def _create_operation(args):\n profile_stats = None\n name = args[0].strip()\n args.pop(0)\n if len(args) > 0 and \"Records produced\" in args[-1]:\n records_produced = int(\n re.search(\"Records produced: (\\\\d+)\", args[-1]).group(1)\n )\n execution_time = float(\n re.search(\"Execution time: (\\\\d+.\\\\d+) ms\", args[-1]).group(1)\n )\n profile_stats = ProfileStats(records_produced, execution_time)\n args.pop(-1)\n return Operation(\n name, None if len(args) == 0 else args[0].strip(), profile_stats\n )\n\n # iterate plan operations\n while i < len(self.plan):\n current_op = self.plan[i]\n op_level = current_op.count(\" \")\n if op_level == level:\n # if the operation level equal to the current level\n # set the current operation and move next\n child = _create_operation(current_op.split(\"|\"))\n if current:\n current = stack.pop()\n current.append_child(child)\n current = child\n i += 1\n elif op_level == level + 1:\n # if the operation is child of the current operation\n # add it as child and set as current operation\n child = _create_operation(current_op.split(\"|\"))\n current.append_child(child)\n stack.append(current)\n current = child\n level += 1\n i += 1\n elif op_level < level:\n # if the operation is not child of current operation\n # go back to it's parent operation\n levels_back = level - op_level + 1\n for _ in range(levels_back):\n current = stack.pop()\n level -= levels_back\n else:\n raise Exception(\"corrupted plan\")\n return stack[0]",
"def _operation_traverse(self, op, op_f, aggregate_f, combine_f): # noqa\n # apply op_f for each operation\n op_res = op_f(op)\n if len(op.children) == 0:\n return op_res # no children return\n else:\n # apply _operation_traverse recursively\n children = [\n self._operation_traverse(child, op_f, aggregate_f, combine_f)\n for child in op.children\n ]\n # combine the operation result with the children aggregated result\n return combine_f(op_res, aggregate_f(children))",
"def convert_concat(g, op, block):\n\n inputs = [g.get_node(op.input(\"X\")[i]) for i in range(len(op.input(\"X\")))]\n axis = op.attr(\"axis\")\n inputs = _dtype_shape_promotion(inputs)\n out = _op.concatenate(inputs, axis=axis)\n g.add_node(op.output(\"Out\")[0], out)",
"def extract_operator_products(e, independent=False):\n ops = []\n\n if isinstance(e, Operator):\n ops.append(e)\n\n elif isinstance(e, Add):\n for arg in e.args:\n ops += extract_operator_products(arg, independent=independent)\n\n elif isinstance(e, Mul):\n c, o = split_coeff_operator(e)\n if o != 1:\n ops.append(o)\n else:\n if debug:\n print(\"Unrecongized type: %s: %s\" % (type(e), str(e)))\n\n no_ops = []\n for op in ops:\n no_op = normal_ordered_form(op.expand(), independent=independent)\n if isinstance(no_op, (Mul, Operator, Pow)):\n no_ops.append(no_op)\n elif isinstance(no_op, Add):\n for sub_no_op in extract_operator_products(no_op, independent=independent):\n no_ops.append(sub_no_op)\n else:\n raise ValueError(\"Unsupported type in loop over ops: %s: %s\" %\n (type(no_op), no_op))\n\n return list(set(no_ops))",
"def Composite(oper, **kw_kernels):\n oplib = {\n '+': dict(\n ufunc=np.add, # associated numpy ufunc\n jfunc=lambda F, f, j: j, # Jacobian evaluator\n jgen=lambda F_expr, j_expr, i: j_expr, # Jacobian code generator\n opname='Addtive',\n ),\n '*': dict(\n ufunc=np.multiply,\n jfunc=lambda F, f, j: F / f * j,\n jgen=lambda F_expr, j_expr, i: Template('(${X * })').render(\n X=F_expr[:i] + (j_expr,) + F_expr[i + 1:]\n ),\n opname='Product'\n ),\n }\n\n if oper not in oplib:\n raise ValueError(f'Invalid reduction operator {repr(oper)}.')\n\n @cpptype([(key, ker.dtype) for key, ker in kw_kernels.items()])\n class CompositeKernel(MicroKernel):\n @property\n def name(self):\n return 'Composite'\n\n @property\n def opname(self):\n return self._opname\n\n def __init__(self, opstr, ufunc, jfunc, jgen, opname, **kw_kernels):\n self.opstr = opstr\n self.ufunc = ufunc\n self.jfunc = jfunc\n self.jgen = jgen\n self._opname = opname\n self.kw_kernels = kw_kernels\n\n def __repr__(self):\n return Template('${cls}(${opstr}, ${kwexpr, })').render(\n cls=self.name,\n opstr=repr(self.opstr),\n kwexpr=[f'{k}={repr(K)}' for k, K in self.kw_kernels.items()])\n\n def __call__(self, X, Y, jac=False):\n if jac is True:\n F, J = list(\n zip(*[kernel(X[key], Y[key], True)\n for key, kernel in self.kw_kernels.items()])\n )\n S = self.ufunc.reduce(F)\n jacobian = np.array([\n self.jfunc(S, f, j) for i, f in enumerate(F) for j in J[i]\n ])\n return S, jacobian\n else:\n return self.ufunc.reduce([\n f(X[k], Y[k]) for k, f in self.kw_kernels.items()\n ])\n\n def gen_expr(self, x, y, theta_scope=''):\n F, J = list(\n zip(*[kernel.gen_expr('%s.%s' % (x, key),\n '%s.%s' % (y, key),\n '%s%s.' % (theta_scope, key))\n for key, kernel in self.kw_kernels.items()])\n )\n f = Template('(${F ${opstr} })').render(opstr=self.opstr, F=F)\n jacobian = [\n self.jgen(F, j, i) for i, _ in enumerate(F) for j in J[i]\n ]\n return f, jacobian\n\n @property\n def theta(self):\n return pretty_tuple(\n self.name,\n self.kw_kernels.keys()\n )(*[k.theta for k in self.kw_kernels.values()])\n\n @theta.setter\n def theta(self, seq):\n for kernel, value in zip(self.kw_kernels.values(), seq):\n kernel.theta = value\n\n @property\n def bounds(self):\n return pretty_tuple(\n self.name,\n self.kw_kernels.keys()\n )(*[k.bounds for k in self.kw_kernels.values()])\n\n @property\n def minmax(self):\n return self.ufunc.reduce(\n [k.minmax for k in self.kw_kernels.values()],\n axis=0\n )\n\n # for the .state property of cpptype\n for key in kw_kernels:\n setattr(CompositeKernel, key,\n property(lambda self, key=key: self.kw_kernels[key]))\n\n return CompositeKernel(oper, **oplib[oper], **kw_kernels)",
"def concat(*xforms):\n\n result = xforms[0]\n\n for i in range(1, len(xforms)):\n result = np.dot(result, xforms[i])\n\n return result",
"def concat(*xforms):\n\n result = xforms[0]\n\n for i in range(1, len(xforms)):\n result = np.dot(result, xforms[i])\n\n return result",
"def compose_children(self):\n for l_symbol, l_info in self.matrix[self.i][self.k].items():\n l_rhs = Nonterminal(l_symbol)\n for r_symbol, r_info in self.matrix[self.k][self.j].items():\n r_rhs = Nonterminal(r_symbol)\n\n # check the subtrees in [i][k] and [k][j] to see if you can make a valid rhs\n potential_rules = [p for p in self.grammar.productions(rhs=l_rhs) if p.rhs()[1] == r_rhs]\n for potential_rule in sorted(potential_rules, key=lambda x: x.prob()):\n new_lhs = potential_rule.lhs().symbol()\n new_tree = Tree(new_lhs, [l_info[1], r_info[1]])\n new_prob = log(potential_rule.prob()) + l_info[0] + r_info[0]\n if new_lhs not in self.matrix[self.i][self.j] or new_prob > self.matrix[self.i][self.j][new_lhs][0]:\n self.matrix[self.i][self.j][new_lhs] = (new_prob, new_tree)",
"def _flatten(self, op):\n if isinstance(self, op):\n for i, arg in enumerate(self._args):\n if isinstance(arg, self.DUAL):\n others = self._args[:i] + self._args[i+1:]\n expr = op.DUAL(*[op(a, *others) for a in arg.args])\n if isinstance(expr, OrAnd):\n return expr._flatten(op)\n else:\n return expr\n else:\n return self\n else:\n nested, others = list(), list()\n for arg in self._args:\n if arg.depth > 1:\n nested.append(arg)\n else:\n others.append(arg)\n args = [arg._flatten(op) for arg in nested] + others\n return op.DUAL(*args)",
"def build(cls, ops, signals):\n\n logger.debug(\"===================\")\n logger.debug(\"BUILD %s\", ops)\n\n if ops not in cls.op_builds:\n raise BuildError(\"Operators build has not been initialized \"\n \"(missed pre-build step)\")\n\n output = cls.op_builds[ops].build_step(signals)\n\n if isinstance(output, (tf.Tensor, tf.Variable)):\n output = [output]\n elif isinstance(output, tuple):\n output = list(output)\n\n return output",
"def cartesian_product(G, H):\n GH = _init_product_graph(G, H)\n GH.add_nodes_from(_node_product(G, H))\n GH.add_edges_from(_edges_cross_nodes(G, H))\n GH.add_edges_from(_nodes_cross_edges(G, H))\n return GH",
"def run_all(operations=ops):\n for operation in operations:\n run(operation)",
"def _combine(self, other, operation):\n if getattr(other, 'empty'):\n return self\n\n if self.empty:\n return other\n\n return QCombination(operation, [self, other])",
"def compute_operator(self, snapshots):\n\n # To avoid recursion function, use FIFO list to simulate the tree\n # structure\n data_queue = [snapshots.copy()]\n\n current_bin = 0\n while data_queue:\n Xraw = data_queue.pop(0)\n\n n_samples = Xraw.shape[1]\n\n step = max(1, int(np.floor(old_div(n_samples, self._nyq))))\n Xsub = Xraw[:, ::step]\n Xc = Xsub[:, :-1]\n Yc = Xsub[:, 1:]\n\n Xc, Yc = compute_tlsq(Xc, Yc, self._tlsq_rank)\n\n rho = old_div(float(self._max_cycles), n_samples)\n sub_operator = SubMrDMDOperator(svd_rank=self._svd_rank,\n eigs_divider=2. * np.pi * step, rho=rho)\n sub_operator.compute_operator(Xc, Yc)\n\n modes = sub_operator.modes\n eigs = sub_operator.eigenvalues\n Atilde = sub_operator.as_numpy_array\n b = sub_operator.compute_sub_amplitudes(Xc, self._opt)\n\n #---------------------------------------------------------------\n # DMD Amplitudes and Dynamics\n #---------------------------------------------------------------\n Vand = np.vander(np.power(eigs, old_div(1., step)), n_samples, True)\n\n Psi = (Vand.T * b).T\n\n self._modes.append(modes)\n self._b.append(b)\n self._Atilde.append(Atilde)\n self._eigenvalues.append(eigs)\n self._nsamples.append(n_samples)\n self._steps.append(step)\n\n if Xraw.dtype == 'float64':\n Xraw -= modes.dot(Psi).real\n else:\n Xraw -= modes.dot(Psi)\n\n if current_bin < 2**(self._max_level - 1) - 1:\n current_bin += 1\n half = int(np.ceil(old_div(Xraw.shape[1], 2)))\n data_queue.append(Xraw[:, :half])\n data_queue.append(Xraw[:, half:])\n else:\n current_bin += 1",
"def test_commutator_expansion():\n hs = LocalSpace(\"0\")\n A = OperatorSymbol('A', hs=hs)\n B = OperatorSymbol('B', hs=hs)\n C = OperatorSymbol('C', hs=hs)\n D = OperatorSymbol('D', hs=hs)\n alpha = symbols('alpha')\n assert Commutator(A + B, C).expand() == Commutator(A, C) + Commutator(B, C)\n assert Commutator(A, B + C).expand() == Commutator(A, B) + Commutator(A, C)\n assert Commutator(A + B, C + D).expand() == (\n Commutator(A, C)\n + Commutator(A, D)\n + Commutator(B, C)\n + Commutator(B, D)\n )\n assert Commutator(A + B, C + D + alpha).expand() == (\n Commutator(A, C)\n + Commutator(A, D)\n + Commutator(B, C)\n + Commutator(B, D)\n )",
"def chain(ops: Sequence[Task]) -> List[Relation]:\n return [Relation(from_task_id=a.task_id, to_task_id=b.task_id) for a, b in zip(ops, ops[1::])]",
"def process_children(cls, operation):\n db = cls._core.get_db()\n\n stmnt = \"SELECT OPE_ID, OPE_TYPE FROM OPERATIONS WHERE OPE_OPE_PARENT = ? ORDER BY OPE_INVOKED ;\"\n stmnt_lock = \"UPDATE OPERATIONS SET OPE_STATUS = 1 WHERE OPE_ID = ? ;\"\n cur = db.query(cls._core,stmnt,(operation.get_id(),))\n for row in cur.fetchallmap():\n child_operation = cls.restore_operation(row)\n db.query(cls._core,stmnt_lock,(child_operation.get_id(),),commit=True)\n try:\n cls.process_children(child_operation)\n child_operation.do_workload()\n except Exception,e:\n stmnt_err = \"UPDATE OPERATIONS SET OPE_STATUS = 2 WHERE OPE_ID = ? ;\"\n db.query(cls._core,stmnt_err,(int(row[\"OPE_ID\"]),),commit=True)\n #TODO GENERATE ERROR IN LOG\n raise e\n stmnt_delete = \"DELETE FROM OPERATIONS WHERE OPE_ID = ?;\"\n db.query(cls._core,stmnt_delete,(child_operation.get_id(),),commit=True)",
"def commute_operands(self, node):\n def is_assumption(n):\n \"\"\"Return whether a node is an assumption.\"\"\"\n if not isinstance(n, types.Symbol):\n return False\n symbol = self.symbol_table.lookup(n.name)\n if symbol and symbol.type_ == SymbolType.StackItem:\n return True\n return False\n\n def has_assumption(n):\n \"\"\"Return whether a BinOpCode contains an assumption.\"\"\"\n if not isinstance(n, types.BinOpCode):\n return False\n return any(is_assumption(i) for i in [n.left, n.right])\n\n def should_commute(n):\n return is_assumption(n) or has_assumption(n)\n\n # Commute operands of different operations.\n # e.g. 2 + assumption + 3 --> 2 + 3 + assumption\n if self.is_commutative(node) and has_assumption(node.left) and node.left.name == node.name:\n # Move the assumption so we can be sure it's in the attribute 'right'.\n if is_assumption(node.left.left):\n node.left.left, node.left.right = node.left.right, node.left.left\n\n self.debug('Commuting operations for %s and %s' % (format_structural_op(node.left), format_structural_op(node.right)), node.lineno)\n right = node.right\n node.right = node.left.right\n node.left.right = right\n\n if should_commute(node.left) or not should_commute(node.right):\n return\n\n if self.is_commutative(node):\n self.debug('Commuting operands for %s' % format_structural_op(node), node.lineno)\n node.left, node.right = node.right, node.left\n elif self.has_logical_equivalent(node):\n logmsg = 'Replacing %s with logical equivalent ' % format_structural_op(node)\n node.name = logical_equivalents[node.name]\n node.left, node.right = node.right, node.left\n logmsg += format_structural_op(node)\n self.debug(logmsg, node.lineno)",
"def traverse_postorder(operation):\n\n nodes_postorder = []\n def recurse(node):\n if isinstance(node, Operation):\n for input_node in node.input_nodes:\n recurse(input_node)\n nodes_postorder.append(node)\n\n recurse(operation)\n return nodes_postorder",
"def generate_operations(self):\n combinations = self.COMBINATIONS.items()[:self.limit]\n for (term1, term2), type in combinations:\n yield (term1, term2, type)",
"def logical_reduce(op_list, op_ctor=LogicalOr, precision=ML_Bool, **kw):\n local_list = [node for node in op_list]\n while len(local_list) > 1:\n op0 = local_list.pop(0)\n op1 = local_list.pop(0)\n local_list.append(\n op_ctor(op0, op1, precision=precision)\n )\n # assigning attributes to the resulting node\n result = local_list[0]\n result.set_attributes(**kw)\n return result",
"def traverse_postorder(operation):\n\n nodes_postorder = []\n def recurse(node):\n if isinstance(node, Operation):\n for input_node in node.input_nodes:\n recurse(input_node)\n nodes_postorder.append(node)\n\n recurse(operation)\n return nodes_postorder",
"def cross(A, B):\n return [a+b for a in A for b in B]",
"def cartesianProduct(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set([t for t in itertools.product(lhs, rhs)])",
"def test_concat_get_op_product_graph(self):\n\n tf.compat.v1.reset_default_graph()\n\n _ = concat_model()\n conn_graph = ConnectedGraph(tf.compat.v1.get_default_graph(), ['input_1'], ['concat_model/Softmax'])\n self.assertTrue(validate_branch_ops(conn_graph))\n self.assertTrue(validate_product_tensor_lists(conn_graph))\n self.assertEqual(2, conn_graph.branch_count)\n self.assertEqual(13, len(conn_graph.get_all_ops()))\n self.assertEqual(12 + len(tf.compat.v1.get_default_graph().get_collection('variables')),\n len(conn_graph.get_all_products()))\n\n # Check that the order of input products to the concat op matches the order of input tensors in the tf graph\n concat_tf_op = tf.compat.v1.get_default_graph().get_operation_by_name(\"concatenate/concat\")\n concat_op = conn_graph.get_all_ops()['concatenate/concat']\n for index, product in enumerate(concat_op.get_input_products()):\n self.assertTrue(len(product.consumers) == 1)\n self.assertEqual(product.tensor_dict[product.consumers[0]], concat_tf_op.inputs[index])"
] | [
"0.5873993",
"0.5623249",
"0.5586423",
"0.55603653",
"0.53981346",
"0.5373104",
"0.5341948",
"0.533148",
"0.5324388",
"0.5261074",
"0.5190329",
"0.5190329",
"0.51866263",
"0.5185752",
"0.51829857",
"0.517488",
"0.51386654",
"0.5113341",
"0.51113427",
"0.50928247",
"0.50926036",
"0.5075281",
"0.50709325",
"0.50672024",
"0.5062822",
"0.5050295",
"0.5047414",
"0.50362426",
"0.5031713",
"0.5031389"
] | 0.605311 | 0 |
Test getting the agent name. | def test_get_agent_name(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", "agent.agent_name"],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
assert result.output == "Agent0\n" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def server_agent_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())",
"def job_agent_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_agent_name\")",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def get_test_name(request):\n return request.node.name",
"def get_name():",
"def GetModernizedTestName(self, arg):\n return arg",
"def get_name() -> str:\n pass",
"def get_name() -> str:",
"def botname(self):\n return settings.AIM_USERNAME",
"def testbed_name(self): \n return \"C-Lab\"",
"def getAgentID(self):\n\t\treturn self.agentID",
"def test_badge_should_have_name(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.name, str)",
"def agent(self):\n return self.__agent",
"def get_name():\n return \"Boss\"",
"def test_get_name_of_variable(self):\n name = Code()\n self.assertEqual(str(name), 'name')",
"def test_name(self):\n computer1 = computer.Computer(1)\n res = computer1.name\n exp = \"CPU\"\n self.assertEqual(res, exp)",
"def test_local_agent_from_source_long_name(self, _):\n agent_name = 'agent-' + ''.join(uuid.uuid4().hex for i in range(4))\n agent_queue = '{0}-queue'.format(agent_name)\n\n inputs = {\n 'source_url': self.source_url,\n 'requirements_file': self.requirements_file,\n 'name': agent_name,\n 'queue': agent_queue\n }\n\n blueprint_path = resources.get_resource(\n 'blueprints/agent-from-source/local-agent-blueprint.yaml')\n self.logger.info('Initiating local env')\n env = local.init_env(name=self._testMethodName,\n blueprint_path=blueprint_path,\n inputs=inputs)\n\n env.execute('install', task_retries=0)\n self.assert_daemon_alive(name=agent_name)\n\n env.execute('uninstall', task_retries=1)\n self.wait_for_daemon_dead(name=agent_name)",
"def test_get_application_name():\n\n assert application_services.get_application_name() == 'tests.unit'",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def getName():",
"def test_get_component_name(initialized_bmi):\n name = initialized_bmi.get_component_name()\n assert isinstance(name, str)\n\n return name"
] | [
"0.6913163",
"0.69033694",
"0.6782439",
"0.6780959",
"0.6780959",
"0.6644797",
"0.6546011",
"0.63555205",
"0.6076936",
"0.6031",
"0.59842354",
"0.59433043",
"0.5923481",
"0.58773845",
"0.5849658",
"0.58205575",
"0.5818908",
"0.5801076",
"0.5792172",
"0.578954",
"0.575222",
"0.57455957",
"0.5730772",
"0.5720691",
"0.5720691",
"0.5720691",
"0.5720691",
"0.5720691",
"0.5720691",
"0.56884176"
] | 0.8780365 | 0 |
Test getting the 'dummy' skill name. | def test_get_skill_name(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", "skills.dummy.name"],
standalone_mode=False,
)
assert result.exit_code == 0
assert result.output == "dummy\n" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def fixture_microbial_sample_name():\n return \"microbial_name_test\"",
"def test_get_github_name_negative(self):\n self.assertIsNone(app.get_github_name(\"undefined_user12345\")[\"user\"])",
"def test_badge_should_have_name(self):\n\n badge = self.get_sample_badge()\n self.assertIsInstance(badge.name, str)",
"def test_get_github_name_positive(self):\n self.assertIsNotNone(app.get_github_name(\"dhh\")[\"user\"])",
"def test_bad_name(self):\n\n request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})\n x = self.start_request_tests(request)\n m = x.json().get(u'message')\n self.assertTrue(x.status_code >= 200)\n self.assertTrue('No Taxon matched\" in \"%s\"' % m)",
"def test_workon_name(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def test_ask_yesno_no(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'nope'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'no')",
"def test_name_returner(self):\n test = self.data.name_returner()\n self.assertIn(('Trevor', 'Harvey'), test)\n self.assertIn(('Nik', 'Silver'), test)",
"def test_ask_yesno_yes(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'yes'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_ask_yesno_other(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'I am a fish'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'I am a fish')",
"def testGetName(self):\n\tself.assertEqual(self.emp.getName(),'Lin') # test getName() whether return correct answer\"\n\tself.assertNotEqual(self.emp2.getName(),'Lin')",
"def test_with_only_names(self, do_student_launch, student_payload):\n del student_payload[\"email\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)",
"def test_2():\n\tname = \"Luke Skywalker\"\n\tassert name.lower() == api_call().json()['name'].lower()",
"def test_name(self):\n dtt = self.TDTT(when=self.txt_when)\n expected_name = self.txt_when\n self.assertEquals(expected_name, dtt.name)\n self.assertEquals(expected_name, '{}'.format(dtt))\n expected_logged = '{}({})'.format(dtt.typename(), self.txt_when)\n self.assertEquals(expected_logged, dtt.logged)",
"def name_test(item):\n return f\"{item['params']['interface']}:{item['expected']['state']}\"",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def test_names():\n first = get_name(\"As\")\n assert first == \"Arsenic\"\n\n second = get_name(\"Be\")\n assert second == \"Beryllium\"\n\n third = get_name(\"Li\")\n assert third == \"Lithium\"",
"def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")",
"def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected",
"def test_get_tag_name(self):\r\n name = self.combinedoe.get_tag_name(\"<t>Tag</t>\")\r\n self.assertEqual(name, \"t\")",
"def test_skills(\n self, mock_get_ai_details, mock_get_ai, mock_get_categories\n ):\n\n mock_get_ai.return_value = self.ai\n mock_get_ai_details.return_value = self.ai_details\n\n mock_get_ai_details.return_value['skills'] = [\n {'name': 'bot 1'},\n {'name': 'bot 2'},\n {'name': 'bot 3'},\n {'name': 'bot 4'},\n {'name': 'bot 5'},\n {'name': 'bot 6'},\n ]\n\n response = self.client.get(reverse(\n 'studio:edit_bot',\n kwargs={'aiid': self.ai['aiid']}\n ))\n\n self.assertContains(response, 'bot 1')\n self.assertContains(response, 'bot 2')\n self.assertContains(response, 'bot 3')\n self.assertContains(response, 'bot 4')\n self.assertContains(response, 'bot 5')\n self.assertNotContains(response, 'bot 6')\n self.assertNotContains(response, 'Speed up your bot building process by '\n 'starting with one of our Templates from the store.')",
"def test_ask_yesno_german(self):\n skill = create_skill(lang='de-de')\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'ja'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_name_empty_string(self):\r\n self.name = \"\"",
"def test_get_by_name1(self):\n pass",
"def test_get_player_names(self):\n INPUT.side_effect = ['A', 'M', 'Z', '']\n names = game.pig.get_player_names()\n self.assertEqual(names, ['A', 'M', 'Z'])",
"def test_get_tool_by_name(tmp_path, caplog, base_db):\r\n caplog.set_level(logging.DEBUG)\r\n tool = base_db.get_single_tool(FAKE_TOOL_INFO.get(\"name\"))\r\n assert tool.name == FAKE_TOOL_INFO.get(\"name\")\r\n tool = base_db.get_single_tool(\"non-existing\")\r\n assert not tool",
"def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")"
] | [
"0.73746306",
"0.6601982",
"0.6425845",
"0.62760645",
"0.6175571",
"0.6128249",
"0.61196023",
"0.6061607",
"0.60439354",
"0.60229063",
"0.5997637",
"0.5944493",
"0.59063286",
"0.58992296",
"0.5851394",
"0.58460176",
"0.58350754",
"0.5834596",
"0.58218324",
"0.5789334",
"0.5785506",
"0.57852894",
"0.5779153",
"0.5772882",
"0.57647854",
"0.57553804",
"0.57519287",
"0.57473546",
"0.5745198",
"0.5730602"
] | 0.864754 | 0 |
Test that the 'get' fails because the path is too short but the root is correct. | def test_too_short_path_but_root_correct(self):
result = self.runner.invoke(
cli, [*CLI_LOG_OPTION, "config", "get", "agent"], standalone_mode=False
)
assert result.exit_code == 1
assert (
result.exception.message
== "The path is too short. Please specify a path up to an attribute name."
)
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", "skills.dummy"],
standalone_mode=False,
)
assert result.exit_code == 1
assert (
result.exception.message
== "The path is too short. Please specify a path up to an attribute name."
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def testInvalidPath(self):\n status, _ = self._http_get(\"invalid_path\")\n self.assertEqual(status, 404)",
"def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent\", \"data\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )",
"def test_invalid_path_get(self):\n static_path = self.finder.find('file.ext')\n self.assertIsNone(static_path)",
"def verify_root_path(self) -> None:\n path = \"/\"\n with self.assertRaises(AccessDeniedException):\n verify_file_path(path)",
"def test_bad_paths(self):\n self.do_test_bad_path('frog', '/frog') # no permission to write",
"def test_get_secrets_invalid_path(self, mget):\n data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n mget.return_value = self._mock_response(status=401, content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secrets('this/path/does/not/exist')",
"def test_get_secret_invalid_path(self, mget):\n data = json.dumps({\"data\": {}})\n mget.return_value = self._mock_response(content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secret('this/path/does/not/exist', 'null')",
"def test_root(self):\n self.skipTest(\"\")\n response = self.fetch('/')\n self.assertEqual(response.code, 404)",
"def test_get_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.get(-1)",
"def test_safeGet(self):\n self.assertIs(\n BMConfigParser().safeGet('nonexistent', 'nonexistent'), None)\n self.assertEqual(\n BMConfigParser().safeGet('nonexistent', 'nonexistent', 42), 42)",
"def test_get_unhappy_paths():\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\")\n\n with pytest.raises(TypeError):\n ContractHandler.get(\"foo name\", \"foo address\")\n\n with pytest.raises(InvalidAddress):\n ContractHandler.get(\"DataTokenTemplate\", \"foo address\")",
"def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)",
"def test_get_absolute_path():\n eq_(get_absolute_path(\"http://foo.com/bar/baz\", \"../foo\"), \"/bar/foo\")\n eq_(get_absolute_path(\"http://foo.com/bar/baz\", \"/foo\"), \"/foo\")",
"def validate_short_path(short_path):",
"def verify_restricted_path(self) -> None:\n path = \"/usr\"\n with self.assertRaises(NotFoundException):\n verify_file_path(path)",
"def testNonExistentRootPath(self):\n\n file_defs = [\n {'name': 'file_1_byte.txt', 'path': '', 'size': 1, 'mod_inc': 1},\n\n # Empty directories\n {'name': 'empty_dir1', 'path': '', 'size': -1},\n {'name': 'empty_dir2', 'path': 'empty_dir1', 'size': -1},\n {'name': 'empty_dir3', 'path': 'empty_dir1/empty_dir2', 'size': -1},\n ]\n\n # All new files\n self._setup_test_store(file_defs)\n self._sync_drives()\n\n drive = self.drive_class(self.account_id, self.config_file_dir, self.config_pw)\n\n with self.assertRaises(ValueError):\n for res in drive.get_root_file_tree('empty_dir1/empty_dir45'):\n pass",
"def test_get_not_exist(self):\n attempt_id = 9999\n _, err = self.resource.get(attempt_id)\n self.assertEqual(404, err)",
"def test_two_legged_get(self):\n resp, content = self._two_legged(\"GET\")\n self.assertEqual(int(resp['status']), 200)",
"def test_set_get_incorrect_path(self):\n with pytest.raises(\n ClickException, match=\"Attribute `.*` for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", self.INCORRECT_PATH],\n standalone_mode=False,\n catch_exceptions=False,\n )\n\n with pytest.raises(\n ClickException,\n match=\"Attribute `behaviours.dummy.args.behaviour_arg_100500` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n self.INCORRECT_PATH,\n str(self.NEW_VALUE),\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_geturl_purpose(self):\n self.fs.create('foo')\n with self.assertRaises(errors.NoURL):\n self.fs.geturl('foo', '__nosuchpurpose__')",
"def test_client_id_path() -> None:\n assert indieauth._parse_client_id(\"http://ex.com\").path == \"/\"\n assert indieauth._parse_client_id(\"http://ex.com/hello\").path == \"/hello\"\n assert (\n indieauth._parse_client_id(\"http://ex.com/hello/.world\").path == \"/hello/.world\"\n )\n assert (\n indieauth._parse_client_id(\"http://ex.com/hello./.world\").path\n == \"/hello./.world\"\n )\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/.\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/hello/./yo\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/hello/../yo\")",
"def test_invalid_path(self):\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, 'foo')",
"def test_no_recognized_root(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"wrong_root.agent_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The root of the dotted path must be one of: {}\".format(\n ALLOWED_PATH_ROOTS\n )\n )",
"def _is_bad_path(path, base):\r\n return not resolved(joinpath(base, path)).startswith(base)",
"def test_error_html_using_get(self):\n pass",
"def test_nonexistent_path(tmpdir_factory):\n folder = Path(tmpdir_factory.mktemp('git'))\n path = folder.joinpath('nonexistent')\n\n with pytest.raises(ValueError):\n gitb.pull(path)",
"def test_append_slash_slashless_unknown(self):\n request = self.rf.get(\"/unknown\")\n response = CommonMiddleware(get_response_404)(request)\n self.assertEqual(response.status_code, 404)",
"def test_invalid_pathname(self):\n self.assertFalse(Util.is_pathname_valid(''))"
] | [
"0.6826721",
"0.6826721",
"0.6796174",
"0.6690637",
"0.6464489",
"0.63691777",
"0.6339634",
"0.631256",
"0.6309888",
"0.6286524",
"0.6273844",
"0.6235973",
"0.62328714",
"0.61507434",
"0.6129715",
"0.61153513",
"0.6104261",
"0.6071585",
"0.60438406",
"0.60402846",
"0.60037696",
"0.60023564",
"0.5958214",
"0.5913527",
"0.5907943",
"0.59011805",
"0.5889473",
"0.58736956",
"0.5872585",
"0.5870241"
] | 0.68506765 | 0 |
Test that getting a nested object in 'dummy' skill fails because path is not valid. | def test_get_fails_when_getting_nested_object(self):
with pytest.raises(
ClickException, match=r"Attribute `.* for .* config does not exist"
):
self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"get",
"skills.dummy.non_existing_attribute.dummy",
],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_access_nested_map(self, nested_map, path, result):\n self.assertEqual(access_nested_map(nested_map, path), result)",
"def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))",
"def test_access_nested_map_exception(self, nested_map, path):\n with self.assertRaises(KeyError) as error:\n access_nested_map(nested_map, path)\n self.assertEqual(error.exception.args[0], path[-1])",
"def test_nested_obj(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"b\":2}}')\n self.assertTrue(check_json_object(jdic, jobj))",
"def test_get_nested_attribute(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.behaviours.dummy.class_name\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"DummyBehaviour\\n\"",
"def test_get_powersupply_parent_exception(self):\n session = self.login_to_apic()\n node = Pod('1')\n self.assertRaises(TypeError, Powersupply.get, session, node)",
"def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli, [*CLI_LOG_OPTION, \"config\", \"get\", \"agent\"], standalone_mode=False\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )",
"def test_pod_invalid_parent(self):\n session = self.login_to_apic()\n parent = Node('1','101','Switch')\n self.assertRaises(TypeError, Pod.get, session, parent)",
"def _get_object(self, path):\n if path == \"/\":\n return self.target\n\n parts = path[1:].split(\"/\")\n last = self.target\n for part in parts:\n if type(last) == dict:\n last = last[part]\n else:\n last = getattr(last, \"get_\" + part)()\n return last",
"def test_load_path(parser):\n doc = parser.load(pathlib.Path('jsonexamples') / 'small' / 'demo.json')\n doc.at_pointer('/Image/Width')",
"def test_add_path(self):\n path = 'C:\\\\test\\\\'\n info = self.api.add_path(path, tags=['asd'])\n self.assertEqual(info['value'], path)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def nested_get(\n d: t.Dict, *path: t.Tuple[str, str], raise_on_missing: bool = True\n) -> t.Optional[t.Any]:\n for name, key in path:\n d = d.get(key) # type: ignore\n if d is None:\n if raise_on_missing:\n name = \"table\" if name == \"this\" else name\n raise ValueError(f\"Unknown {name}: {key}\")\n return None\n\n return d",
"def test_utils_get_dict_value_from_path_should_return_none_when_value_does_not_exists(\n path,\n):\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n assert ralph_utils.get_dict_value_from_path(dictionary, path) is None",
"def _getattr_path(obj: Any, path: str) -> Any:\n if not path:\n return None\n\n for attr in path.split('.'):\n obj = getattr(obj, attr, None)\n return obj",
"def test_get_study_missing(self):\n self.assertIsNone(self.storage.get_study('missing'))",
"def test_descriptor_with_nopath(self):\r\n\r\n self._get_descriptor_with_invalid_link(NoPathToItem)",
"def test_no_path():\n test = [{'key': 'val'}, []]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'path list empty' in str(t_result.failure())",
"def test_nested(cls, value, res):\n\tobj = cls(value, DEFAULT_POD)\n\tassert obj == res",
"def access_path(data: dict or any, path: list[str]) -> any:\n if path:\n first = path[0]\n rest = path[1:]\n return access_path(data[first], rest)\n return data",
"def test_dotwiz_plus_get_item():\n dd = DotWizPlus()\n dd.a = [{'one': 1, 'two': {'key': 'value'}}]\n\n item = dd['a'][0]\n assert isinstance(item, DotWizPlus)\n assert item['one'] == 1\n\n assert item['two']['key'] == 'value'",
"def test_search_key() -> None:\n # assert that having a wrong key at root level\n # in the json will raise an error\n key = \"toto\"\n d = {\"toto\": {\"a\": \"b\"}, \"c\": \"d\"}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n # Search when the key is in a deeper nested level\n key = \"nested_key\"\n d = {\"en\": {\"level1\": {\"level2\": {\"nested_key\": \"value\"}}}}\n\n with pytest.raises(Exception):\n Translator.search_key(d, key)\n\n return",
"def test_nested_dict(self):\n nested = self.TEI.nested_dict(exclude=[\"tei:note\"])\n self.assertEqual(nested[\"1\"][\"pr\"][\"1\"], \"Spero me secutum in libellis meis tale temperamen-\",\n \"Check that dictionary path is well done\")\n self.assertEqual(nested[\"1\"][\"12\"][\"1\"], \"Itur ad Herculeas gelidi qua Tiburis arces \",\n \"Check that dictionary path works on more than one passage\")\n self.assertEqual(nested[\"2\"][\"pr\"][\"1\"], \"'Quid nobis' inquis 'cum epistula? parum enim tibi \",\n \"Check that different fist level works as well\")\n self.assertEqual(nested[\"1\"][\"3\"][\"8\"], \"Ibis ab excusso missus in astra sago. \",\n \"Check that notes are removed \")\n self.assertEqual(\n [list(nested.keys()), list(nested[\"1\"].keys())[:3], list(nested[\"2\"][\"pr\"].keys())[:3]],\n [[\"1\", \"2\"], [\"pr\", \"1\", \"2\"], [\"sa\", \"1\", \"2\"]],\n \"Ensure that text keeps its order\")",
"def test_embedded_json(self):\n json_data = '{\"a\": {\"b\" : true } }'\n json_flattened = json_flatten(json_data)\n self.assertEqual(json.loads(json_flattened), json.loads('{\"a.b\" : true}'))",
"def testIsInterestingPath(self):\n # pylint: disable=protected-access\n self.assertTrue(self.turbinia_processor._isInterestingPath(TEST_TASK_PATH))",
"def test_get_type_for_key_path_depth_one_level(test_schema):\n assert (\n get_type_for_key_path(test_schema, \"EmploymentInformation.OriginalHireDate\")\n == \"string\"\n )",
"def _get_from_nest(nest, path):\n if not path or not nest:\n return nest\n return _get_from_nest(nest.get(path[0], None), path[1:])",
"def test_root_get(self):\n pass",
"def test_root_get(self):\n pass",
"def test_get_chain_by_id(self):\n pass"
] | [
"0.608936",
"0.6087938",
"0.60613996",
"0.59348756",
"0.59342015",
"0.59085965",
"0.5834793",
"0.58287233",
"0.5761605",
"0.57341295",
"0.5677793",
"0.56325066",
"0.5624134",
"0.558056",
"0.55728745",
"0.5569612",
"0.55691016",
"0.5559063",
"0.5552797",
"0.5551889",
"0.5543451",
"0.553241",
"0.55232847",
"0.5497863",
"0.5474393",
"0.5466955",
"0.54574656",
"0.5449676",
"0.5449676",
"0.54269326"
] | 0.6654142 | 0 |
Test that getting a vendor component with wrong component type raises error. | def test_get_fails_when_getting_vendor_dependency_with_wrong_component_type(self):
result = self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"get",
"vendor.fetchai.component_type_not_correct.error.non_existing_attribute",
],
standalone_mode=False,
)
assert result.exit_code == 1
s = "'component_type_not_correct' is not a valid component type. Please use one of ['protocols', 'connections', 'skills', 'contracts']."
assert result.exception.message == s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_component_with_invalid_name():\n\n with pytest.raises(ComponentAttributeError):\n application_services.get_component('missing_component')",
"def test_register_component_with_invalid_type():\n\n with pytest.raises(InvalidComponentTypeError):\n component = CoreObject()\n application_services.register_component(component)",
"def test_remove_component_invalid():\n\n with pytest.raises(ComponentAttributeError):\n application_services.get_component('missing_component_to_remove')",
"def test_register_component_with_invalid_type_only_component():\n\n with pytest.raises(InvalidComponentTypeError):\n component = OnlyComponentMock('only_component')\n application_services.register_component(component)",
"def test_register_component_with_invalid_name():\n\n with pytest.raises(InvalidComponentNameError):\n component = ComponentWithInvalidNameMock('')\n application_services.register_component(component)",
"def test_component_loading_module_not_found_error_framework_package_with_wrong_type(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\n \"No module named 'packages.some_author.some_type'\"\n ),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=r\"No module named packages.some_author.some_type; 'some_type' is not a valid type name, choose one of \\['protocols', 'connections', 'skills', 'contracts'\\]\",\n ):\n load_component_from_config(component_configuration)",
"def test_get_component_with_invalid_custom_key():\n\n component = ComponentWithInvalidCustomKeyMock('component_with_invalid_key')\n custom_component = DuplicateComponentWithInvalidCustomKeyMock('component_with_invalid_key',\n component_custom_key=3000)\n application_services.register_component(component)\n application_services.register_component(custom_component)\n assert application_services.get_component('component_with_invalid_key',\n component_custom_key=999) == component\n\n application_services.remove_component(component.get_id())\n application_services.remove_component(custom_component.get_id())",
"def test_register_component_with_invalid_type_only_manager():\n\n with pytest.raises(InvalidComponentTypeError):\n component = OnlyManagerMock()\n application_services.register_component(component)",
"def test_component_loading_component_exception(component_configuration):\n\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=AEAComponentLoadException(\"Generic exception\"),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=\"Package loading error: An error occurred while loading protocol an_author/a_protocol:0.1.0: Generic exception\",\n ):\n load_component_from_config(component_configuration)",
"def test_component_without_owner_is_trac_error(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n\n # we purposely forget to add component_owner to config\n # and run the plugin expecting a TracError\n admin_command = TicketFieldConfigCommand(self.env)\n self.assertRaises(TracError,admin_command.set_fields_from_config)",
"def test_remove_component():\n\n component = application_services.get_component('database.component')\n application_services.remove_component(component.get_id())\n\n with pytest.raises(ComponentAttributeError):\n application_services.get_component('database.component')\n\n application_services.register_component(component)",
"def test_component_loading_module_not_found_error_non_framework_package(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\"No module named 'generic.package'\"),\n ):\n with pytest.raises(ModuleNotFoundError):\n load_component_from_config(component_configuration)",
"def test_invalid_device_type():\n _aws_device(wires=2, device_type=\"foo\", shots=None)",
"def test_upgrade_non_vendor(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:100.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_component_loading_module_not_found_error_framework_package_with_wrong_author(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\"No module named 'packages.some_author'\"),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=\"No module named packages.some_author; No AEA package found with author name 'some_author'\",\n ):\n load_component_from_config(component_configuration)",
"def test_component_loading_generic_module_not_found_error(component_configuration):\n\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\n \"Package loading error: An error occurred while loading .*: Generic error\"\n ),\n ):\n with pytest.raises(ModuleNotFoundError, match=\"Generic error\"):\n load_component_from_config(component_configuration)",
"def test_component_loading_generic_exception(component_configuration):\n\n with mock.patch.object(\n Protocol, \"from_config\", side_effect=Exception(\"Generic exception\")\n ):\n with pytest.raises(\n Exception, match=\"Package loading error: An error occurred while loading\"\n ):\n load_component_from_config(component_configuration)",
"def test_component_loading_module_not_found_error_framework_package_with_wrong_name(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\n \"No module named 'packages.some_author.protocols.some_name'\"\n ),\n ):\n with pytest.raises(\n AEAPackageLoadingError,\n match=\"No module named packages.some_author.protocols.some_name; No AEA package found with author name 'some_author', type 'protocols', name 'some_name'\",\n ):\n load_component_from_config(component_configuration)",
"def test_component_loading_module_not_found_error_framework_package(\n component_configuration,\n):\n with mock.patch.object(\n Protocol,\n \"from_config\",\n side_effect=ModuleNotFoundError(\"No module named 'packages'\"),\n ):\n with pytest.raises(ModuleNotFoundError, match=\"No module named 'packages'\"):\n load_component_from_config(component_configuration)",
"def test_invalid_odata_version():\n\n with pytest.raises(PyODataException) as e_info:\n pyodata.Client(SERVICE_URL, requests, 'INVALID VERSION')\n\n assert str(e_info.value).startswith('No implementation for selected odata version')",
"def test_type_check(ExampleComponentClass):\n\n instance = ExampleComponentClass()\n\n configure(instance, {\"a\": 4.5}, name=\"x\")\n\n # Attempting to access the field should now raise a type error.\n with pytest.raises(\n TypeError,\n match=\"Field 'a' of component 'x' is annotated with type '<class 'int'>', which is not satisfied by value 4.5.\",\n ):\n instance.a",
"def test_get_software(self):\n pass",
"def test_not_found(self):\n self.library.get.when.called_with('dummy!!!')\\\n .should.throw(ViolationDoesNotExists)",
"def test_get_device_unknown():\n device = get_device(SERIAL, CREDENTIAL, \"unknown\")\n assert device is None",
"async def test_device_unknown_error(hass):\n with patch.object(axis.device, \"get_device\", side_effect=Exception):\n await setup_axis_integration(hass)\n assert hass.data[AXIS_DOMAIN] == {}",
"def test_component_remove_error_bad_component(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('component remove bad_component')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)",
"async def test_select_errors(hass: HomeAssistant, vehicle_type: str):\n\n entity_registry = mock_registry(hass)\n device_registry = mock_device_registry(hass)\n\n invalid_upstream_exception = exceptions.InvalidUpstreamException(\n \"err.tech.500\",\n \"Invalid response from the upstream server (The request sent to the GDC is erroneous) ; 502 Bad Gateway\",\n )\n\n with patch(\"homeassistant.components.renault.PLATFORMS\", [SELECT_DOMAIN]):\n await setup_renault_integration_vehicle_with_side_effect(\n hass, vehicle_type, invalid_upstream_exception\n )\n await hass.async_block_till_done()\n\n mock_vehicle = MOCK_VEHICLES[vehicle_type]\n check_device_registry(device_registry, mock_vehicle[\"expected_device\"])\n\n expected_entities = mock_vehicle[SELECT_DOMAIN]\n assert len(entity_registry.entities) == len(expected_entities)\n for expected_entity in expected_entities:\n entity_id = expected_entity[\"entity_id\"]\n registry_entry = entity_registry.entities.get(entity_id)\n assert registry_entry is not None\n assert registry_entry.unique_id == expected_entity[\"unique_id\"]\n state = hass.states.get(entity_id)\n assert state.state == STATE_UNAVAILABLE\n for attr in FIXED_ATTRIBUTES:\n assert state.attributes.get(attr) == expected_entity.get(attr)\n # Check dynamic attributes:\n assert state.attributes.get(ATTR_ICON) == get_no_data_icon(expected_entity)\n assert ATTR_LAST_UPDATE not in state.attributes",
"def test_unknown_service(self):\n raise NotImplementedError # FIXME",
"def test_register_component_duplicate():\n\n component = DuplicateComponentMock('component_duplicate')\n application_services.register_component(component)\n\n with pytest.raises(DuplicateComponentIDError):\n application_services.register_component(component)\n\n application_services.remove_component(component.get_id())",
"def test_badComponentName(self):\n nPins = 12\n fuelDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 0.9, \"id\": 0.0, \"mult\": nPins}\n cladDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 1.1, \"id\": 1.0, \"mult\": nPins}\n fuel = Circle(\"fuel\", \"UZr\", **fuelDims)\n clad = Circle(\"clad_4.2.3\", \"HT9\", **cladDims)\n gapDims = {\n \"Tinput\": 25.0,\n \"Thot\": 430.0,\n \"od\": \"clad_4.2.3.id\",\n \"id\": \"fuel.od\",\n \"mult\": nPins,\n }\n gapDims[\"components\"] = {\"clad_4.2.3\": clad, \"fuel\": fuel}\n with self.assertRaises(ValueError):\n _gap = Circle(\"gap\", \"Void\", **gapDims)"
] | [
"0.7484369",
"0.70648474",
"0.6751108",
"0.6693934",
"0.65564436",
"0.6499122",
"0.6351091",
"0.6348619",
"0.62459064",
"0.61908615",
"0.61505693",
"0.6141869",
"0.6136945",
"0.6136352",
"0.61140954",
"0.608789",
"0.60551083",
"0.60050845",
"0.59680504",
"0.59571373",
"0.59189194",
"0.5881391",
"0.5876208",
"0.5874755",
"0.58706355",
"0.5863667",
"0.5850145",
"0.58384687",
"0.58353895",
"0.58065677"
] | 0.7972388 | 0 |
Test setting the agent name. | def test_set_agent_incorrect_value(self):
with pytest.raises(
ClickException,
match="Attribute `not_agent_name` is not allowed to be updated!",
):
self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", "agent.not_agent_name", "new_name"],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_agent_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"agent.agent_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert result.output == \"Agent0\\n\"",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def server_agent_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_set_name_return(self) -> None:\n\n actual = self.helper.set_name(self.test_name)\n\n self.assertIsInstance(actual, EnvironmentVariableHelper)",
"def set_object_name(self, agent, Name):\n\n self.send_ObjectName(agent, agent.agent_id, agent.session_id, {1:[self.LocalID, Name]})",
"def __init__(self, agent_name):\n\n self._agent_name = agent_name",
"def test_get_name(self):\n self.assertEqual(self.testcommand.get_name(), \"team\")",
"def test_change_name_of_the_devicetrue():",
"def test_set_name_method(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.set_name(given)\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"",
"def test_show_agent(self):\n with self.override_role():\n self.agents_client.show_agent(self.agent['id'])",
"def generate_agent_name():\n\n return '{0}-{1}'.format(\n defaults.CLOUDIFY_AGENT_PREFIX,\n uuid.uuid4())",
"def test_change_name_of_the_devicefalse():",
"def test_set_name_attribute(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.name = given\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def name(self, name: str):\n self.inst['targetname'] = name",
"def server_agent_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_agent_name\")",
"def test_name_set(self):\n name = \"Field Name\"\n field = basic.flag(name=name)\n\n self.assertEqual(name, field.name)\n\n self.assertEqual(name, field.name)",
"def is_java_agent(self):\r\n return self.has_label('java_agent')",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def set_name(self, name=\"\"):\n if isinstance(name, str):\n self.__name = name\n return 0\n print(\"type of nom is not STR\")\n return 1",
"def job_agent_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"job_agent_name\")",
"def set_name_for_actor(name, actor):\n key = StringKey.MakeKey(\"MeshName\", \"root\")\n i = vtk.vtkInformation()\n i.Set(key, name)\n actor.SetPropertyKeys(i)",
"def botname(self):\n return settings.AIM_USERNAME",
"def test_default_agent_port(self):\n options = ControlOptions()\n options.parseOptions([])\n self.assertEqual(options[\"agent-port\"], b'tcp:4524')",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def _check_name(self):\n\t\tpass",
"def test_set_library_name(self):\n s1 = System()\n s1.set_library_name(\"Andreson\")\n self.assertEqual(s1.get_library_name(), \"Andreson\")",
"def the_user_changes_the_name_of_the_device(name):\n web_app.change_property_softassert(\"name\",name)"
] | [
"0.72085243",
"0.6366684",
"0.62835133",
"0.6257389",
"0.6257389",
"0.62132823",
"0.6131713",
"0.60440767",
"0.60348433",
"0.5927437",
"0.5916145",
"0.58946425",
"0.5865132",
"0.5844421",
"0.57391584",
"0.5722152",
"0.57040644",
"0.5700256",
"0.56795913",
"0.5665505",
"0.5655928",
"0.5632392",
"0.5624648",
"0.5613523",
"0.55860275",
"0.55763686",
"0.55761933",
"0.5556421",
"0.5545556",
"0.5524442"
] | 0.7100356 | 1 |
Test setting the 'dummy' skill name. | def test_set_skill_name_should_fail(self):
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", "skills.dummy.name", "new_dummy_name"],
standalone_mode=False,
)
assert result.exit_code == 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_skill_name(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy.name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"dummy\\n\"",
"def test_name_empty_string(self):\r\n self.name = \"\"",
"def fixture_microbial_sample_name():\n return \"microbial_name_test\"",
"def test_name_false(self):\r\n self.name = False",
"def test_change_name_without_name(self):\r\n self.client.login(username=self.student.username, password='test')\r\n change_name_url = self.get_url()\r\n resp = self.client.post(change_name_url, {\r\n 'new_name': '',\r\n 'rationale': 'change identity'\r\n })\r\n response_data = json.loads(resp.content)\r\n self.assertFalse(response_data['success'])",
"def test_set_name_through_init(self) -> None:\n\n given = self.test_name\n expected = given\n\n helper = EnvironmentVariableHelper(given)\n actual = helper.name\n\n self.assertEqual(expected, actual)",
"def test_change_name_of_the_devicefalse():",
"def test_name_detection(self):\n self.project.name = ''\n self.project.detect_name()\n self.assertEqual(\"Kobol's Last Gleaming\", self.project.name)",
"def test_change_name_of_the_devicetrue():",
"def test_with_only_names(self, do_student_launch, student_payload):\n del student_payload[\"email\"]\n\n response = do_student_launch()\n\n assert_launched_as_student(response)",
"def test_ask_yesno_no(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'nope'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'no')",
"def test_ask_yesno_yes(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'yes'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'yes')",
"def test_thingname_nostr(self, mock):\n mock.configure_mock(**(self.config_shadowget(ParamValidationError(\n report='UnitTest'))))\n self.assertRaises(\n ParamValidationError,\n lf.lambda_handler, event=self.lambdaevent, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()",
"def test_selection_name(self):\n skill = create_skill()\n skill.speak = mock.Mock()\n skill.get_response = mock.Mock()\n\n skill.get_response.return_value = 'octopus'\n\n options = ['a balloon', 'an octopus', 'a piano']\n response = skill.ask_selection(options, 'which is better')\n self.assertEqual(options[1], response)\n\n # Assert that the spoken sentence contains all options.\n spoken_sentence = skill.speak.call_args[0][0]\n for opt in options:\n self.assertTrue(opt in spoken_sentence)",
"def test_bad_name(self):\n\n request = service.get_request('GET', {u'taxon': u'Nosuchtaxonia'})\n x = self.start_request_tests(request)\n m = x.json().get(u'message')\n self.assertTrue(x.status_code >= 200)\n self.assertTrue('No Taxon matched\" in \"%s\"' % m)",
"def test_extra_default_codeword(self):\n self.alice.add_codeword(\"flugelhorn\", \"ranged\")\n\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")",
"def test_ask_yesno_other(self):\n skill = create_skill()\n skill.get_response = mock.Mock()\n skill.get_response.return_value = 'I am a fish'\n\n response = skill.ask_yesno('Do you like breakfast')\n self.assertEqual(response, 'I am a fish')",
"def test_name(self):\n inst = Amenity()\n self.assertTrue(hasattr(inst, \"name\"))\n self.assertEqual(inst.name, \"\")",
"def test_first_name(self, unromanized, romanized, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__first_name=unromanized,\n profile__romanized_first_name=romanized,\n )\n assert CDDWriter.first_name(profile) == expected",
"def test_set_name_method(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.set_name(given)\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def test_set_name_attribute(self) -> None:\n\n given = self.test_name\n expected = given\n\n self.helper.name = given\n\n actual = self.helper.name\n\n self.assertEqual(expected, actual)",
"def test_dummy():",
"def test_workon_name(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"",
"def testName(self):\n dis_meta = DiseaseMeta()\n\n self.util.stringTypeTest(self, dis_meta, \"name\")\n\n self.util.stringPropertyTest(self, dis_meta, \"name\")",
"def test_name(self):\n insta = Amenity()\n self.assertTrue(hasattr(insta, \"name\"))\n self.assertEqual(insta.name, \"\")",
"def test_default_codeword(self):\n battle = self.battle\n s1 = battle.create_skirmish(self.alice, 1, troop_type='muppet')\n self.assertEqual(s1.troop_type, \"infantry\")",
"def test_name_set(self):\n name = \"Field Name\"\n field = basic.flag(name=name)\n\n self.assertEqual(name, field.name)\n\n self.assertEqual(name, field.name)",
"def test_legal_names(self):\r\n products = generate_products()\r\n for product in products:\r\n test_adjective, test_noun = product.name.split(\"_\")\r\n self.assertIn(test_adjective, ADJECTIVES)\r\n self.assertIn(test_noun, NOUNS)",
"def test_thingname_nokey(self, mock):\n self.assertRaises(\n KeyError,\n lf.lambda_handler, event=self.lambdaevent_nokey, context=None)\n mock.client.return_value.update_thing_shadow.assert_not_called()",
"def test_legal_names(self):\n adjectives = ['Awesome', 'Shiny', 'Impressive', 'Portable', 'Improved']\n nouns = ['Anvil', 'Catapult' 'Disguise' 'Mousetrap', '???']\n products = acme_report.generate_products()\n for prod in range(len(products)):\n prod_name = products[prod].name\n name_split = prod_name.split()\n self.assertIn(name_split[0], adjectives)\n self.assertIn(name_split[1], nouns)"
] | [
"0.7661072",
"0.6244207",
"0.61193323",
"0.6040593",
"0.59651697",
"0.5919205",
"0.5802785",
"0.5780799",
"0.5780309",
"0.5751823",
"0.57467544",
"0.5730054",
"0.57029426",
"0.5673183",
"0.5667184",
"0.56606334",
"0.5655326",
"0.56511235",
"0.5628001",
"0.5626317",
"0.56261504",
"0.560481",
"0.5593023",
"0.5590323",
"0.5576531",
"0.5537359",
"0.5523387",
"0.5498324",
"0.5490404",
"0.5487239"
] | 0.8387392 | 0 |
Test setting a nested attribute. | def test_set_nested_attribute(self):
path = "skills.dummy.behaviours.dummy.args.behaviour_arg_1"
new_value = "10" # cause old value is int
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", path, new_value],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", path],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
assert new_value in result.output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_fails_when_setting_nested_object(self):\n with pytest.raises(\n ClickException,\n match=r\"Attribute `non_existing_attribute.dummy` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"skills.dummy.non_existing_attribute.dummy\",\n \"new_value\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_set_nested_attribute_not_allowed(self):\n path = \"skills.dummy.behaviours.dummy.config.behaviour_arg_1\"\n new_value = \"new_dummy_name\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Attribute `behaviours.dummy.config.behaviour_arg_1` is not allowed to be updated!\"\n )",
"def test_attribute_setters(self):\n test = self.test\n test.id = 2\n test['name'] = 'bar'\n\n self.assertEqual(test.id, 2)\n self.assertEqual(test['name'], 'bar')",
"def test_set_attr(self):\n self.my_city.name = \"Denver\"\n self.assertEqual(self.my_city.name, \"Denver\")",
"def test_update_attribute_data(self):\n pass",
"def test_set_attribute():\n elem = hr.Element(\"this is some text\", id=\"spam\", style=\"eggs\")\n elem.set_attributes(holy=\"grail\", answer=42)\n\n assert (\n get_opening_line(elem)\n == '<html id=\"spam\" style=\"eggs\" holy=\"grail\" answer=\"42\">'\n )",
"def testSetAttributeAction(self):\n\t action = SetAttributeAction('x', 'y', ('key',), 'z')\n\t self.failUnless(action.field == 'y')\n\t self.failUnless(action.value == 'z')",
"def test_getter_child_attr(self):\n root = netapp_api.NaElement('root')\n root.add_attr('val', 'FAKE_VALUE')\n\n self.assertEqual('FAKE_VALUE',\n root.__getitem__('val'))",
"def setattr_nested(obj, attributes, value):\n pre, _, post = attributes.rpartition(\".\")\n setattr(getattr_nested(obj, pre) if pre else obj, post, value)",
"def test_adding_attributes(self):\n self.assertEqual(self.compound.get_attribute(\"What\"), \"Everything\")",
"def test_setAttribute():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.setAttribute();\n x.setAttribute(\"foo\");\n x.setAttribute(\"foo\", \"bar\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.setAttribute(\"onfoo\", \"bar\");\n \"\"\").failed()",
"def test_one_att(self):\n self.test_attribute.is_down = mock.Mock(return_value=False)\n self.run_mock_analyzer([self.test_attribute, ])\n self.assert_mock_analyzer(self.test_attribute)",
"def set_nested_attr(__obj: object, __name: str, __value: Any):\n pre, _, post = __name.rpartition('.')\n return setattr(get_nested_attr(__obj, pre) if pre else __obj, post, __value)",
"def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )",
"def testSetParent(self):\n def setParent():\n self.node.parent = 'banana'\n\n self.assertRaises(\n AttributeError,\n setParent\n )",
"def test_attribute_access(self):\n cd = ConfigDict()\n\n cd['x'] = 1\n self.assertEquals(cd.x, 1)\n\n cd.y = 2\n self.assertEquals(cd['y'], 2)",
"def test_name_attribute_assignment(self):\n self.assertNotIn('aldous', self.__dict__)\n self.aldous\n self.assertIn('aldous', self.__dict__)\n self.assertIs(self.__dict__['aldous'], self.aldous)",
"def test_set_attrs(self):\n city2 = City()\n city2.name = \"Hawaii\"\n self.assertEqual(city2.name, \"Hawaii\")\n city2.state_id = \"<3\"\n self.assertEqual(city2.state_id, \"<3\")\n self.assertEqual(City.name, \"\")\n self.assertEqual(City.state_id, \"\")",
"def test_data_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.data = 0",
"def test_attribute(self):\n xp = XPathQuery(\"/foo[@attrib1]\")\n self.assertEqual(xp.matches(self.e), True)",
"def test_set_with_deep_key_path_with_string():\n deep_key_path = 'deep.key.path'\n test_value = 'deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('deep'), dict)\n assert config.get(deep_key_path) == test_value",
"def test_attributeWithValue(self):\n xp = XPathQuery(\"/foo[@attrib1='value1']\")\n self.assertEqual(xp.matches(self.e), 1)",
"def test_update_metadata_by_attribute(self):\n pass",
"def test_parent_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.parent = None",
"def test_bad_attribute_access(self):\n test = self.test\n\n self.assertRaises(AttributeError, test.__getattr__, 'poop')\n # test.poop = 'foo' should set a new object attr 'poop'\n self.assertRaises(KeyError, test.__getitem__, 'poop')\n self.assertRaises(KeyError, test.__setitem__, 'poop', 'foo')",
"def test_register_existing_attr(self):\n pass",
"def test_get_attribute_data(self):\n pass",
"def test_attr_type(self):\n self.my_city.state_id = \"1c5dd90a-a3df-4516-b1ac-32a8715e5539\"\n self.my_city.name = \"New York\"\n self.assertIsInstance(self.my_city.name, str)\n self.assertIsInstance(self.my_city.state_id, str)",
"def test_setter_child_dict(self):\n root = netapp_api.NaElement('root')\n root['d'] = {'e1': 'v1', 'e2': 'v2'}\n e1 = root.get_child_by_name('d')\n self.assertIsInstance(e1, netapp_api.NaElement)\n sub_ch = e1.get_children()\n self.assertEqual(len(sub_ch), 2)\n for c in sub_ch:\n self.assertIn(c.get_name(), ['e1', 'e2'])\n if c.get_name() == 'e1':\n self.assertEqual(c.get_content(), 'v1')\n else:\n self.assertEqual(c.get_content(), 'v2')",
"def set_value(node, attr, attr_data, verbose=False):\n\n keyable = attr_data.get('keyable')\n non_keyable = attr_data.get('non_keyable')\n value = attr_data.get('value')\n attr_type = attr_data.get('type')\n\n excluded_types = ['float2', 'float3', 'double2', 'double3',\n 'compound', 'message', 'short3', 'long2', 'long3']\n try:\n if not mc.objExists(node+'.'+attr):\n if verbose:\n mc.warning('# Attr {0}.{1} doe not exist! Skipping..'.format(node, attr))\n return\n\n elif attr_type in excluded_types:\n return\n\n elif attr_type == 'string':\n if not value:\n value = ''\n mc.setAttr(node+'.'+attr, value, type='string')\n\n else:\n mc.setAttr(node+'.'+attr, value)\n\n if verbose:\n print 'Set attribute value: '+node+'.'+attr\n\n except:\n if verbose:\n mc.warning('Could not set '+attr_type+' attr value :'+node+'.'+attr)"
] | [
"0.7471137",
"0.6970188",
"0.656537",
"0.6477467",
"0.63603884",
"0.61522514",
"0.6127132",
"0.6091757",
"0.6082865",
"0.6073286",
"0.60494506",
"0.59865016",
"0.59510404",
"0.59033054",
"0.59033054",
"0.5884618",
"0.5871959",
"0.58645827",
"0.58420926",
"0.5816876",
"0.5816365",
"0.58151364",
"0.57993424",
"0.57891285",
"0.5776685",
"0.5766262",
"0.5751023",
"0.5744841",
"0.5743418",
"0.5729915"
] | 0.7355271 | 1 |
Test that setting the 'dummy' skill behaviours fails because not a primitive type. | def test_set_fails_when_setting_non_primitive_type(self):
with pytest.raises(
ClickException, match="Attribute `behaviours` is not allowed to be updated!"
):
self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", "skills.dummy.behaviours", "value"],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dummy(self):\n pass",
"def test_handler_no_type_hints(self):\n with self.assertRaises(ValueError):\n\n @intent_handler\n def decorated_test(context, param):\n return None",
"def test_dispatch_missing(self):\n self.skill.logic = {}\n self.assertRaises(KeyError, self.skill.dispatch)",
"def testTypeSingle(self):\n prop = make_prop(kind=bool)\n with self.assertRaises(TypeError):\n prop.interpret(1, {})\n\n self.assertEqual(True, prop.interpret(True, {}))",
"def test_handler_no_type_hints_param(self):\n with self.assertRaises(ValueError):\n\n @intent_handler\n def decorated_test(param):\n return None",
"def testTheType(self, theTestType):\n \n pass",
"def testPowerBadType(self):\n def setPower():\n self.cc.power = 'ban'\n\n self.assertRaises(\n TypeError,\n setPower\n )",
"def test_dummy():",
"def _dummy(*args, **kwargs):\n pass",
"def test_patch_none():",
"def test_set_skill_name_should_fail(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy.name\", \"new_dummy_name\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def test_no_source():\n assert get_type_hints(int) == {}",
"def test_handle(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.act()",
"def test_default_product_stealability(self):\n prod = Product('Test Product')\n self.assertEqual(prod.stealability(), \"Kinda stealable.\")",
"def test_no_bleedthrough():\n\n @type_checked\n def _run_test(*args, ok:int, then:float, well:bool, **kwargs:str):\n assert args == (\"12\", 4, None, 19.9)\n assert ok == 90\n assert then == 17.2\n assert well is True\n assert kwargs == {\"one\": \"111\", \"two\": \"22.2\"}\n\n _run_test(\"12\", 4, None, 19.9, ok=\"90\", then=\"17.2\", well=\"True\", one=111,\n two=22.2)",
"def test_badyvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, True, 3)\n self.assertEqual(str(e.exception), 'y must be an integer')",
"def test_sample_one_sample_type(self):\r\n self.assertEqual(self.test_sample.sampleType, 'TUMOUR')",
"def test_is_not_missed():\n game = Game()\n game.word = 'word'\n assert game.is_missed('w') is False",
"def test_return_types():\n my_method = SGMethod(\"Test\")\n \n my_method.return_type = \"SoundEffect\"\n assert my_method.return_type == \"SoundEffect\"",
"def test(self) -> Any:\n pass",
"def testNoSpecialties(self):\n self.failUnlessEqual(self.person.getSpecialties(), [])",
"def allow(self, test):\n raise NotImplementedError()",
"def test_default_sound_system(self):\n\n self.assertFalse(self.mc.machine_config['sound_system']['enabled'])\n self.assertIsNone(self.mc.sound_system)",
"def test_dummy_test():\n pass",
"def setUp(self):\n self.false_int = \"A\"",
"def test_bad_property_setting(self):\n s = State(substance=\"water\")\n with pytest.raises(AttributeError):\n # Should be lowercase p\n s.TP = Q_(400.0, \"K\"), Q_(101325.0, \"Pa\")",
"def _dummy(*args, **kwargs):\n err_str = \"\"\n raise NotImplementedError()",
"def test_work_without_activity(human):\n with pytest.raises(AttributeError):\n human.work()",
"def test_badxvaluewithbools(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, False, 2, 3)\n self.assertEqual(str(e.exception), 'x must be an integer')",
"def test_theft_and_stealing(self):"
] | [
"0.611732",
"0.5922523",
"0.5780935",
"0.577883",
"0.57609355",
"0.57075256",
"0.56813645",
"0.56792086",
"0.55930203",
"0.5585353",
"0.5578745",
"0.55738044",
"0.55481416",
"0.5526285",
"0.5522617",
"0.55186546",
"0.55089563",
"0.54697376",
"0.54681265",
"0.5462291",
"0.5453334",
"0.54506284",
"0.544105",
"0.5422537",
"0.54190695",
"0.53859216",
"0.5379288",
"0.5372681",
"0.53722334",
"0.5362338"
] | 0.7457731 | 0 |
Test that setting a nested object in 'dummy' skill fails because path is not valid. | def test_get_fails_when_setting_nested_object(self):
with pytest.raises(
ClickException,
match=r"Attribute `non_existing_attribute.dummy` is not allowed to be updated!",
):
self.runner.invoke(
cli,
[
*CLI_LOG_OPTION,
"config",
"set",
"skills.dummy.non_existing_attribute.dummy",
"new_value",
],
standalone_mode=False,
catch_exceptions=False,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_utils_set_dict_value_from_path_creating_new_fields():\n dictionary = {}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def test_set_nested_attribute_not_allowed(self):\n path = \"skills.dummy.behaviours.dummy.config.behaviour_arg_1\"\n new_value = \"new_dummy_name\"\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"Attribute `behaviours.dummy.config.behaviour_arg_1` is not allowed to be updated!\"\n )",
"def test_utils_set_dict_value_from_path_updating_fields():\n dictionary = {\"foo\": {\"bar\": \"bar_value\"}}\n ralph_utils.set_dict_value_from_path(dictionary, [\"foo\", \"bar\"], \"baz\")\n assert dictionary == {\"foo\": {\"bar\": \"baz\"}}",
"def test_set_nested_attribute(self):\n path = \"skills.dummy.behaviours.dummy.args.behaviour_arg_1\"\n new_value = \"10\" # cause old value is int\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", path],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert new_value in result.output",
"def test_set_with_shallow_path():\n shallow_key_path = 'shallow_key_path'\n test_value = 'shallow key path value'\n\n config.set(shallow_key_path, test_value)\n assert config.get(shallow_key_path) == test_value",
"def test_get_fails_when_getting_nested_object(self):\n with pytest.raises(\n ClickException, match=r\"Attribute `.* for .* config does not exist\"\n ):\n self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"skills.dummy.non_existing_attribute.dummy\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_set_with_deep_key_path_with_string():\n deep_key_path = 'deep.key.path'\n test_value = 'deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('deep'), dict)\n assert config.get(deep_key_path) == test_value",
"def test_nested_objf(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"bc\":2}}')\n self.assertFalse(check_json_object(jdic, jobj))",
"def test_invoke_invalid_object(mock_boto3_client, mock_boto3_resource):\n from odl_datalake_ingestion import lambda_handler\n mock_context = MockContext()\n mock_event[\"Records\"][0][\"s3\"][\"object\"][\"key\"] = \"this/path/doesnt/exist.ext\"\n lambda_handler(mock_event, mock_context)",
"def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent\", \"data\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"skills.dummy\", \"value\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )",
"def test_nested_obj(self):\n jobj = JObject(keys = ['status', JObject(parent = 'nest', keys= ['a','b']), \n 'result'])\n jdic = json.loads('{\"status\": \"success\", \"result\": \"yes\", \"nest\": {\"a\":1,\"b\":2}}')\n self.assertTrue(check_json_object(jdic, jobj))",
"def test_add_path(self):\n path = 'C:\\\\test\\\\'\n info = self.api.add_path(path, tags=['asd'])\n self.assertEqual(info['value'], path)\n tags = [t['name'] for t in info['tags']]\n self.assertEqual(tags, ['asd'])",
"def testInitialize(self):\n path_spec = tsk_path_spec.TSKPathSpec(\n location=u'/test', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n data_stream=u'test', location=u'/test', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n inode=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = tsk_path_spec.TSKPathSpec(\n location=u'/test', inode=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(location=u'/test', parent=None)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(location=None, parent=self._path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(inode=None, parent=self._path_spec)\n\n with self.assertRaises(ValueError):\n _ = tsk_path_spec.TSKPathSpec(\n location=u'/test', parent=self._path_spec, bogus=u'BOGUS')",
"def test_too_short_path_but_root_correct(self):\n result = self.runner.invoke(\n cli, [*CLI_LOG_OPTION, \"config\", \"get\", \"agent\"], standalone_mode=False\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )\n\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", \"skills.dummy\"],\n standalone_mode=False,\n )\n assert result.exit_code == 1\n assert (\n result.exception.message\n == \"The path is too short. Please specify a path up to an attribute name.\"\n )",
"def test_set_with_deep_key_path_with_list():\n deep_key_path = ('second', 'deep', 'key', 'path')\n test_value = 'second deep key path value'\n\n config.set(deep_key_path, test_value)\n assert isinstance(config.get('second'), dict)\n assert config.get(deep_key_path) == test_value",
"def test_set_without_path_sets_the_root(self):\n mock_config = {'foo': 'bar'}\n root_config = Config()\n root_config.set(value=mock_config)\n self.assertDictEqual(root_config.get(), mock_config)",
"def test_access_nested_map(self, nested_map, path, result):\n self.assertEqual(access_nested_map(nested_map, path), result)",
"def test_access_nested_map_exception(self, nested_map, path):\n with self.assertRaises(KeyError) as error:\n access_nested_map(nested_map, path)\n self.assertEqual(error.exception.args[0], path[-1])",
"def test_json_error(self):\n with self.assertRaises(AttributeError):\n FileStorage.__objects\n FileStorage.__File_Path",
"def test_nested(cls, value, res):\n\tobj = cls(value, DEFAULT_POD)\n\tassert obj == res",
"def test_set_item(self):\n content = json.dumps({\n \"nb\": \"test-nb\",\n \"en\": \"test-en\",\n })\n structure = MultiLingualTextStructure(content, use_default_for_empty=True)\n\n self.assertEqual(structure[\"nb\"], \"test-nb\")\n self.assertEqual(structure[\"en\"], \"test-en\")\n structure[\"nb\"] = \"changed-nb\"\n self.assertEqual(structure[\"nb\"], \"changed-nb\")\n self.assertEqual(structure[\"en\"], \"test-en\")",
"def test_parent_read_only():\n t = Tree(None)\n with pytest.raises(AttributeError):\n t.parent = None",
"def test_init_with_nested_dicts(self):\n regex = 'mappings can not be nested'\n with self.assertRaisesRegex(ValueError, regex):\n query = DataQuery({'A': {'B': 'C'}}, D='x')",
"def test_set_item_from_outside(self):\n\n expected = {\n self.file_to_test: {\n \"hello.world\": {\n \"included_at_epoch\": 190.0,\n \"included_at_iso\": \"1970-01-01T01:03:10\",\n \"last_retested_at_epoch\": 190.0,\n \"last_retested_at_iso\": \"1970-01-01T01:03:10\",\n \"status\": PyFunceble.STATUS.official.invalid,\n },\n \"world.hello\": {\n \"included_at_epoch\": 0.0,\n \"included_at_iso\": \"1970-01-01T01:00:00\",\n \"last_retested_at_epoch\": 0.0,\n \"last_retested_at_iso\": \"1970-01-01T01:00:00\",\n \"status\": PyFunceble.STATUS.official.down,\n },\n },\n }\n\n self.inactive_db.database = {\n self.file_to_test: {\n \"world.hello\": {\n \"included_at_epoch\": 0.0,\n \"included_at_iso\": \"1970-01-01T01:00:00\",\n \"last_retested_at_epoch\": 0.0,\n \"last_retested_at_iso\": \"1970-01-01T01:00:00\",\n \"status\": PyFunceble.STATUS.official.down,\n },\n },\n }\n\n self.inactive_db[\"hello.world\"] = {\n \"included_at_epoch\": 190.0,\n \"included_at_iso\": \"1970-01-01T01:03:10\",\n \"last_retested_at_epoch\": 190.0,\n \"last_retested_at_iso\": \"1970-01-01T01:03:10\",\n \"status\": PyFunceble.STATUS.official.invalid,\n }\n\n self.assertEqual(expected, self.inactive_db.database)",
"async def test_update_with_json_attrs_with_json_attrs_path(hass: HomeAssistant) -> None:\n\n respx.get(\"http://localhost\").respond(\n status_code=HTTPStatus.OK,\n json={\n \"toplevel\": {\n \"master_value\": \"123\",\n \"second_level\": {\n \"some_json_key\": \"some_json_value\",\n \"some_json_key2\": \"some_json_value2\",\n },\n },\n },\n )\n assert await async_setup_component(\n hass,\n SENSOR_DOMAIN,\n {\n SENSOR_DOMAIN: {\n \"platform\": DOMAIN,\n \"resource\": \"http://localhost\",\n \"method\": \"GET\",\n \"value_template\": \"{{ value_json.toplevel.master_value }}\",\n \"json_attributes_path\": \"$.toplevel.second_level\",\n \"json_attributes\": [\"some_json_key\", \"some_json_key2\"],\n \"name\": \"foo\",\n \"unit_of_measurement\": UnitOfInformation.MEGABYTES,\n \"verify_ssl\": \"true\",\n \"timeout\": 30,\n \"headers\": {\"Accept\": \"text/xml\"},\n }\n },\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 1\n state = hass.states.get(\"sensor.foo\")\n\n assert state.state == \"123\"\n assert state.attributes[\"some_json_key\"] == \"some_json_value\"\n assert state.attributes[\"some_json_key2\"] == \"some_json_value2\"",
"def test_circular_nested(self):\n obj = {}\n obj[\"list\"] = [{\"obj\": obj}]\n with self.assertRaises(orjson.JSONEncodeError):\n orjson.dumps(obj)",
"def testDirectorySetBadType(self):\n def setDirectory():\n self.mr.directory = 12345\n\n self.assertRaises(\n TypeError,\n setDirectory\n )",
"def testInitialize(self):\n path_spec = apm_path_spec.APMPathSpec(parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = apm_path_spec.APMPathSpec(\n location='/apm2', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = apm_path_spec.APMPathSpec(\n entry_index=1, parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n path_spec = apm_path_spec.APMPathSpec(\n entry_index=1, location='/apm2', parent=self._path_spec)\n\n self.assertIsNotNone(path_spec)\n\n with self.assertRaises(ValueError):\n apm_path_spec.APMPathSpec(parent=None)\n\n with self.assertRaises(ValueError):\n apm_path_spec.APMPathSpec(\n parent=self._path_spec, bogus='BOGUS')",
"def test_settings_items(mock_empty_os_environ):\n climate = core.Climate()\n climate.update({\"a\": {\"b\": {\"c\": [1, 2, 3]}}, \"d\": [{\"e\": \"f\"}, {\"g\": \"h\"}]})\n assert climate.settings[\"a\"] == {\"b\": {\"c\": [1, 2, 3]}}\n assert climate.settings.a == {\"b\": {\"c\": [1, 2, 3]}}\n assert climate.settings.a.b.c[0] == 1\n\n # test assignment\n for value in [{\"new\": \"data\"}, \"blaaa\", [3, 4, 5]]:\n with pytest.raises(TypeError):\n climate.settings.a.b.c = value\n climate.update({\"a\": {\"b\": {\"c\": value}}})\n assert climate.settings.a.b.c == value\n\n for value in [{\"new\": \"data\"}, \"blaaa\", 100]:\n with pytest.raises(TypeError):\n climate.settings.a.b.c[0] = value\n climate.update({\"a\": {\"b\": {\"c\": [value]}}})\n assert climate.settings.a.b.c[0] == value\n\n # test deletion\n with pytest.raises(TypeError):\n del climate.settings.a.b[\"c\"]\n climate.update({\"a\": {\"b\": {\"c\": core.REMOVED}}})\n assert climate.settings.a.b == {}\n climate.update()\n assert climate.settings.a.b == {}\n\n # test attribute deletion\n with pytest.raises(TypeError):\n del climate.settings.d[0].e\n climate.update({\"d\": [{\"e\": core.REMOVED}]})\n assert climate.settings.d == [{}, {\"g\": \"h\"}]\n climate.update()\n assert climate.settings.d == [{}, {\"g\": \"h\"}]\n\n # test sequence item deletion\n climate.update({\"d\": [core.REMOVED]})\n assert climate.settings.d == [{\"g\": \"h\"}]\n climate.update()\n assert climate.settings.d == [{\"g\": \"h\"}]\n\n # test second deletion at index to make sure that it is applied after the previous deletion\n climate.update({\"d\": [core.REMOVED]})\n assert climate.settings.d == []\n climate.update()\n assert climate.settings.d == []",
"def test_set_path_4(self, verify_path2_mock):\n test_file = Path(\"/dir1/dir2/../file.txt\")\n verify_path2_mock.return_value = (True, None)\n output = basic.set_path(test_file, kind=\"file\", expect=True)\n exp = Path(\"/dir1/file.txt\")\n self.assertEqual(output, exp)"
] | [
"0.65963215",
"0.6569621",
"0.64509624",
"0.6348378",
"0.61588395",
"0.5966255",
"0.5864911",
"0.5818894",
"0.57906985",
"0.57551765",
"0.57298034",
"0.5715899",
"0.570213",
"0.5672622",
"0.5666781",
"0.566096",
"0.56185013",
"0.56091845",
"0.55812943",
"0.55795157",
"0.5563023",
"0.5555255",
"0.55440736",
"0.5526201",
"0.54905695",
"0.54895663",
"0.5485383",
"0.5475001",
"0.54728854",
"0.5465715"
] | 0.6992566 | 0 |
Test component value updated in agent config not in component config. | def test_set_get_correct_path(self):
agent_config = self.load_agent_config()
assert not agent_config.component_configurations
config_value = self.get_component_config_value()
assert config_value == self.INITIAL_VALUE
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", self.PATH],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
assert str(self.INITIAL_VALUE) in result.output
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "set", self.PATH, str(self.NEW_VALUE)],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
config_value = self.get_component_config_value()
assert config_value == self.INITIAL_VALUE
result = self.runner.invoke(
cli,
[*CLI_LOG_OPTION, "config", "get", self.PATH],
standalone_mode=False,
catch_exceptions=False,
)
assert result.exit_code == 0
assert str(self.NEW_VALUE) in result.output
agent_config = self.load_agent_config()
assert agent_config.component_configurations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_custom_configuration_updated(self):\n component_protocol_id = ComponentId(\n ComponentType.PROTOCOL, self.new_protocol_id\n )\n component_contract_id = ComponentId(\n ComponentType.CONTRACT, self.new_contract_id\n )\n component_connection_id = ComponentId(\n ComponentType.CONNECTION, self.new_connection_id\n )\n component_skill_id = ComponentId(ComponentType.SKILL, self.new_skill_id)\n\n assert (\n self.agent_config.component_configurations[component_protocol_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_contract_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_connection_id]\n == self.expected_custom_component_configuration\n )\n assert (\n self.agent_config.component_configurations[component_skill_id]\n == self.expected_custom_component_configuration\n )",
"def test_component_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ComponentAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_component_list(), self.default['component'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'component',\n ','.join(self.new['component']))\n # create component_owner option\n self.env.config.set('ticket-field-config','component_owner','test')\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_component_list(), self.new['component'])",
"def test_component_configuration_removed_from_agent_config(self):\n with cd(self._get_cwd()):\n self.run_cli_command(\n \"add\", \"--local\", self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID)\n )\n self.run_cli_command(\"add\", \"--local\", \"connection\", \"fetchai/http_server\")\n\n self.runner.invoke(\n cli,\n [\n \"config\",\n \"set\",\n \"vendor.fetchai.connections.soef.config.api_key\",\n \"some_api_key\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )\n self.runner.invoke(\n cli,\n [\n \"config\",\n \"set\",\n \"vendor.fetchai.connections.http_server.config.port\",\n \"9000\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )\n config = self.load_config()\n assert config.component_configurations\n assert (\n PackageId(self.ITEM_TYPE, self.ITEM_PUBLIC_ID)\n in config.component_configurations\n )\n\n self.run_cli_command(\"remove\", self.ITEM_TYPE, str(self.ITEM_PUBLIC_ID))\n\n config = self.load_config()\n assert (\n PackageId(self.ITEM_TYPE, self.ITEM_PUBLIC_ID)\n not in config.component_configurations\n )\n assert config.component_configurations",
"def test_set_existing_property():\n\n value = 'new'\n\n contents = (\"[Info]\\n\"\n \"sdk = old\")\n\n testutils.deploy_config_raw(contents)\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0",
"def test_config_update(get_config):\n cfg = get_config(Config, {'test': 'main'})\n update_from = {\"name\": \"new_name\"}\n cfg.update(update_from)\n\n assert cfg.data.get('name') == \"new_name\", \"config was not updated\"",
"def test_agent_config_updated(self):\n loader = ConfigLoader.from_configuration_type(PackageType.AGENT)\n with Path(self._get_cwd(), DEFAULT_AEA_CONFIG_FILE).open() as fp:\n agent_config = loader.load(fp)\n assert DefaultMessage.protocol_id in agent_config.protocols\n assert ERROR_SKILL_PUBLIC_ID in agent_config.skills",
"def test_set_invalid_value(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"set\",\n \"agent.agent_name\",\n \"true\",\n \"--type=bool\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 1",
"def test_config_change():\n clean_tables()\n config = set_configuration()\n assert config['age']['value'] == \"72\"\n assert config['retainUnsent']['value'] == \"False\" \n\n config = update_configuration(age=0, retain_unsent=True) \n assert config['age']['value'] == \"0\" \n assert config['retainUnsent']['value'] == \"True\"\n\n clean_tables()",
"def test_test_property():\n\n contents = (\"[Info]\\n\"\n \"sdk = 23\")\n\n testutils.deploy_config_raw(contents)\n\n assert prop.test_prop('info', 'sdk') == 1\n\n testutils.undeploy()\n\n return 0",
"def test_update_node_driveconfig(self):\n pass",
"def test_set_new_property():\n\n value = '1'\n contents = (\"[info]\\n\"\n \"real = not_real\")\n\n testutils.deploy_config_raw(contents)\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()",
"def test_update_deployment_state(self):\n pass",
"async def test_full_config(hass, mock_client):\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()\n assert hass.bus.listen.called\n assert hass.bus.listen.call_args_list[0][0][0] == EVENT_STATE_CHANGED",
"def test_set_nested_attribute(self):\n path = \"skills.dummy.behaviours.dummy.args.behaviour_arg_1\"\n new_value = \"10\" # cause old value is int\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", path, new_value],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n result = self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"get\", path],\n standalone_mode=False,\n catch_exceptions=False,\n )\n assert result.exit_code == 0\n assert new_value in result.output",
"def test_set_property_success(self):\r\n self.config.option1 = 9001\r\n self.assertEqual(self.config.values['option1'], 9001)\r\n\r\n self.config.option2 = 'bar'\r\n self.assertEqual(self.config.values['option2'], 'bar')",
"async def test_full_config(hass: HomeAssistant, mock_client) -> None:\n config = {\n prometheus.DOMAIN: {\n \"namespace\": \"ns\",\n \"default_metric\": \"m\",\n \"override_metric\": \"m\",\n \"requires_auth\": False,\n \"component_config\": {\"fake.test\": {\"override_metric\": \"km\"}},\n \"component_config_glob\": {\"fake.time_*\": {\"override_metric\": \"h\"}},\n \"component_config_domain\": {\"climate\": {\"override_metric\": \"°C\"}},\n \"filter\": {\n \"include_domains\": [\"climate\"],\n \"include_entity_globs\": [\"fake.time_*\"],\n \"include_entities\": [\"fake.test\"],\n \"exclude_domains\": [\"script\"],\n \"exclude_entity_globs\": [\"climate.excluded_*\"],\n \"exclude_entities\": [\"fake.time_excluded\"],\n },\n }\n }\n assert await async_setup_component(hass, prometheus.DOMAIN, config)\n await hass.async_block_till_done()",
"def test_update_state(self):\n pass",
"def test_config_changed_non_leader(\n self,\n ) -> NoReturn:\n self.harness.set_leader(is_leader=False)\n self.harness.charm.on.config_changed.emit()\n\n # Assertions\n self.assertIsInstance(self.harness.charm.unit.status, ActiveStatus)",
"def test_update_reg_ex_config(self):\n pass",
"async def test_value_updated(\n hass: HomeAssistant, vision_security_zl7432, integration, client\n) -> None:\n node = vision_security_zl7432\n # Add states to the value we are updating to ensure the translation happens\n node.values[\"7-37-1-currentValue\"].metadata.data[\"states\"] = {\"1\": \"on\", \"0\": \"off\"}\n events = async_capture_events(hass, \"zwave_js_value_updated\")\n\n event = Event(\n type=\"value updated\",\n data={\n \"source\": \"node\",\n \"event\": \"value updated\",\n \"nodeId\": 7,\n \"args\": {\n \"commandClassName\": \"Switch Binary\",\n \"commandClass\": 37,\n \"endpoint\": 1,\n \"property\": \"currentValue\",\n \"newValue\": 1,\n \"prevValue\": 0,\n \"propertyName\": \"currentValue\",\n },\n },\n )\n\n node.receive_event(event)\n # wait for the event\n await hass.async_block_till_done()\n assert len(events) == 1\n assert events[0].data[\"home_id\"] == client.driver.controller.home_id\n assert events[0].data[\"node_id\"] == 7\n assert events[0].data[\"entity_id\"] == \"switch.in_wall_dual_relay_switch\"\n assert events[0].data[\"command_class\"] == CommandClass.SWITCH_BINARY\n assert events[0].data[\"command_class_name\"] == \"Switch Binary\"\n assert events[0].data[\"endpoint\"] == 1\n assert events[0].data[\"property_name\"] == \"currentValue\"\n assert events[0].data[\"property\"] == \"currentValue\"\n assert events[0].data[\"value\"] == \"on\"\n assert events[0].data[\"value_raw\"] == 1\n\n # Try a value updated event on a value we aren't watching to make sure\n # no event fires\n event = Event(\n type=\"value updated\",\n data={\n \"source\": \"node\",\n \"event\": \"value updated\",\n \"nodeId\": 7,\n \"args\": {\n \"commandClassName\": \"Basic\",\n \"commandClass\": 32,\n \"endpoint\": 1,\n \"property\": \"currentValue\",\n \"newValue\": 1,\n \"prevValue\": 0,\n \"propertyName\": \"currentValue\",\n },\n },\n )\n\n node.receive_event(event)\n # wait for the event\n await hass.async_block_till_done()\n # We should only still have captured one event\n assert len(events) == 1",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def test_get_current_component_status_DISABLED(self):\n self._ucr({\n 'repository/online/component/a': 'no',\n })\n ORIG = UU.FN_UPDATER_APTSOURCES_COMPONENT\n try:\n tmp = NamedTemporaryFile()\n UU.FN_UPDATER_APTSOURCES_COMPONENT = tmp.name\n self.assertEqual(UU.COMPONENT_DISABLED, self.u.get_current_component_status('a'))\n finally:\n UU.FN_UPDATER_APTSOURCES_COMPONENT = ORIG\n tmp.close()",
"def test_configure_override_field_values(ExampleComponentClass):\n\n x = ExampleComponentClass()\n configure(x, {\"a\": 0, \"b\": \"bar\"})\n assert x.a == 0\n assert x.b == \"bar\"",
"def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)",
"def test_component_update_available_NO(self):\n self.assertFalse(self.u.component_update_available())",
"def test_component_update_available_NEW(self):\n MockPopen.mock_stdout = 'Inst b (new from)'\n self.assertTrue(self.u.component_update_available())",
"def test_set_new_section_property():\n\n value = '1'\n testutils.deploy_config_raw(\"\")\n\n prop.set_prop('info', 'sdk', value)\n assert prop.get_prop('info', 'sdk') == value\n\n testutils.undeploy()\n\n return 0",
"def test_update_wait():\n wait = '10 seconds'\n config_info = read_config()\n config_info['wait'] = wait\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['wait'] == wait",
"def test_set_agent_incorrect_value(self):\n with pytest.raises(\n ClickException,\n match=\"Attribute `not_agent_name` is not allowed to be updated!\",\n ):\n self.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"config\", \"set\", \"agent.not_agent_name\", \"new_name\"],\n standalone_mode=False,\n catch_exceptions=False,\n )",
"def test_config(self):\n\n # We start in uninitialized state.\n # In this state there is no driver process.\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)\n \n # Ping the agent.\n retval = self._ia_client.ping_agent()\n log.info(retval)\n\n # Initialize the agent.\n # The agent is spawned with a driver config, but you can pass one in\n # optinally with the initialize command. This validates the driver\n # config, launches a driver process and connects to it via messaging.\n # If successful, we switch to the inactive state.\n cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.INACTIVE)\n\n # Ping the driver proc.\n retval = self._ia_client.ping_resource()\n log.info(retval)\n\n decoder = IonObjectDeserializer(obj_registry=get_obj_registry())\n\n # Grab the alarms defined in the config.\n retval = decoder.deserialize(self._ia_client.get_agent(['alarms'])['alarms'])\n\n \"\"\"\n {'status': None, 'stream_name': 'parsed', 'name': 'test_sim_warning',\n 'upper_bound': 5.0, 'expr': 'x<5.0', 'upper_rel_op': '<',\n 'lower_rel_op': None, 'type_': 'IntervalAlarmDef', 'value_id': 'temp',\n 'lower_bound': None, 'message': 'Temperature is above test range of 5.0.',\n 'current_val': None, 'type': 1}\n \"\"\"\n self.assertEqual(retval[0].type_, 'IntervalAlarmDef')\n self.assertEqual(retval[0].upper_bound, 5.0)\n self.assertEqual(retval[0].expr, 'x<5.0')\n \n # Reset the agent. This causes the driver messaging to be stopped,\n # the driver process to end and switches us back to uninitialized.\n cmd = AgentCommand(command=ResourceAgentEvent.RESET)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)"
] | [
"0.6883115",
"0.6502125",
"0.64594984",
"0.6202278",
"0.6129778",
"0.61204875",
"0.6075345",
"0.6033572",
"0.5983752",
"0.5950724",
"0.5938686",
"0.59221786",
"0.5911883",
"0.59022325",
"0.5892399",
"0.58266765",
"0.57814544",
"0.57743114",
"0.5774052",
"0.5764389",
"0.5747986",
"0.5745692",
"0.5728632",
"0.57181954",
"0.5712753",
"0.56938446",
"0.56841576",
"0.5665145",
"0.5653854",
"0.5637229"
] | 0.6550141 | 1 |
Test agent config manager get_overridables. | def test_AgentConfigManager_get_overridables():
path = Path(CUR_PATH, "data", "dummy_aea")
agent_config = AEABuilder.try_to_load_agent_configuration_file(path)
config_manager = AgentConfigManager(agent_config, path)
agent_overridables, component_overridables = config_manager.get_overridables()
assert "description" in agent_overridables
assert "is_abstract" in list(component_overridables.values())[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ini_get_all():\n raise NotImplementedError()",
"def antenny_list_configs(self):\n return self.antenny_config.list_configs()",
"def getConfigAll(self):\n return self.configAll(False)",
"def getConfigs(self, host):\n raise \"not implemented\"",
"def test_get_list(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"vendor.fetchai.connections.p2p_libp2p.config.entry_peers\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"[]\\n\"",
"def test_list_config_nodes(self):\n with self.override_role():\n self.config_client.list_config_nodes()",
"def target_interfaces(self):",
"def target_interfaces(self):",
"def test_find_agent_ips(self):\n\n with patch(\n \"salt.cloud.clouds.proxmox.query\",\n return_value={\n \"result\": [\n {\n \"name\": \"eth0\",\n \"ip-addresses\": [\n {\"ip-address\": \"1.2.3.4\", \"ip-address-type\": \"ipv4\"},\n {\"ip-address\": \"2001::1:2\", \"ip-address-type\": \"ipv6\"},\n ],\n },\n {\n \"name\": \"eth1\",\n \"ip-addresses\": [\n {\"ip-address\": \"2.3.4.5\", \"ip-address-type\": \"ipv4\"},\n ],\n },\n {\n \"name\": \"dummy\",\n },\n ]\n },\n ) as mock_query:\n vm_ = {\n \"technology\": \"qemu\",\n \"host\": \"myhost\",\n \"driver\": \"proxmox\",\n \"ignore_cidr\": \"1.0.0.0/8\",\n }\n\n # CASE 1: Test ipv4 and ignore_cidr\n result = proxmox._find_agent_ip(vm_, ANY)\n mock_query.assert_any_call(\n \"get\", \"nodes/myhost/qemu/{}/agent/network-get-interfaces\".format(ANY)\n )\n\n assert result == \"2.3.4.5\"\n\n # CASE 2: Test ipv6\n\n vm_[\"protocol\"] = \"ipv6\"\n result = proxmox._find_agent_ip(vm_, ANY)\n mock_query.assert_any_call(\n \"get\", \"nodes/myhost/qemu/{}/agent/network-get-interfaces\".format(ANY)\n )\n\n assert result == \"2001::1:2\"",
"def net_list_on_dhcp_agent(mgr_or_client, *args, **kwargs):\n return net_list(mgr_or_client, *args, **kwargs)",
"def test_list_global_system_configs(self):\n with self.override_role():\n self.config_client.list_global_system_configs()",
"def test_getorgs(self):\n pass",
"def _get_interfaces(self):\n return self.__interfaces",
"def _get_interfaces(self):\n return self.__interfaces",
"def _get_interfaces(self):\n return self.__interfaces",
"def test_dont_merge_if_multiple_client(self):\r\n raise SkipTest(\"Not implemented\")",
"def test_agent_config_updated(self):\n loader = ConfigLoader.from_configuration_type(PackageType.AGENT)\n with Path(self._get_cwd(), DEFAULT_AEA_CONFIG_FILE).open() as fp:\n agent_config = loader.load(fp)\n assert DefaultMessage.protocol_id in agent_config.protocols\n assert ERROR_SKILL_PUBLIC_ID in agent_config.skills",
"def test_client_addresses_list(self):\n pass",
"def get_agent_network_interfaces(self):\n iface_list = [iface.serialize()['name'] for iface in\n hardware.dispatch_to_managers('list_network_interfaces')]\n iface_list = [name for name in iface_list if 'lo' not in name]\n\n if len(iface_list) == 0:\n raise errors.LookupAgentInterfaceError('Agent could not find a '\n 'valid network interface.')\n else:\n return iface_list",
"def test_get_hyperflex_config_result_list(self):\n pass",
"def create_mock_api_discovery(aioclient_mock, bridges):\n aioclient_mock.get(\n URL_NUPNP,\n json=[{\"internalipaddress\": host, \"id\": id} for (host, id) in bridges],\n )\n for host, bridge_id in bridges:\n aioclient_mock.get(\n f\"http://{host}/api/config\",\n json={\"bridgeid\": bridge_id},\n )\n # mock v2 support if v2 found in id\n aioclient_mock.get(\n f\"https://{host}/clip/v2/resources\",\n status=403 if \"v2\" in bridge_id else 404,\n )",
"def test_list_agents(self):\n admin_resource_id = self.agent['id']\n with (self.override_role_and_validate_list(\n admin_resource_id=admin_resource_id)) as ctx:\n ctx.resources = self.agents_client.list_agents(\n id=admin_resource_id)[\"agents\"]",
"def _get_oslo_configs():\n # NOTE(flaper87): Oslo config should be\n # optional. Instead of doing try / except\n # at the top of this file, lets import cfg\n # here and assume that the caller of this\n # function already took care of this dependency.\n from oslo.config import cfg\n\n return [\n cfg.StrOpt('cache_url', default='memory://',\n help='URL to connect to the cache back end.')\n ]",
"def test_list_build_config_for_all_namespaces(self):\n pass",
"def available_auto_connection():\n path = os.path.dirname(verticapy.__file__) + \"/connections.verticapy\"\n confparser = ConfigParser()\n confparser.optionxform = str\n try:\n confparser.read(path)\n confparser.remove_section(\"VERTICAPY_AUTO_CONNECTION\")\n except:\n pass\n all_connections = confparser.sections()\n return all_connections",
"def overrides(self) -> ConfigNodePropertyArray:\n return self._overrides",
"def _config_table(self):\n return self.targets",
"def test_server_override_general(self):\n # Sanity check our override values do not overlap\n self.assertNotEqual(CONFIG_DATA[\"ConcurrentWorkers\"],\n CONFIG_DATA[\"OverrideConcurrentWorkers\"])\n self.assertNotEqual(CONFIG_DATA[\"SaveTimeoutMinutes\"],\n CONFIG_DATA[\"OverrideSaveTimeoutMinutes\"])\n self.assertNotEqual(CONFIG_DATA[\"RetainImageMinutes\"],\n CONFIG_DATA[\"OverrideRetainImageMinutes\"])\n self.assertNotEqual(CONFIG_DATA[\"Region\"],\n CONFIG_DATA[\"OverrideRegion\"])\n config_data = imageroller.main.read_config(\n self._cmd_args,\n imageroller.test.get_config_parser(\n self._server_valid_override))\n # Verify default disabled server is not included\n self.assertNotIn(\n CONFIG_DATA[\"OverrideNotExistFQDN\"],\n [server_data.name for server_data in\n config_data.server_data])\n # Sanity check we have every server's config we expect to have\n self.assertSetEqual(\n set([server_data.name for server_data in\n config_data.server_data]),\n {CONFIG_DATA[\"OverrideWorkersFQDN\"],\n CONFIG_DATA[\"OverrideSaveTimeoutFQDN\"],\n CONFIG_DATA[\"OverrideRetainImageFQDN\"],\n CONFIG_DATA[\"OverrideRegionFQDN\"]},\n )\n # Smoke test they are all enabled\n self.assertTrue(all([server_data.enabled\n for server_data in\n config_data.server_data]))",
"def server_agent_list(ctx, output_format, columns):\n data = ctx.obj.get_agents()\n\n for agent in data['agent']:\n agent_info = ctx.obj.get_agent_by_agent_id(agent['id'])\n agent['ip'] = agent_info['ip']\n agent['pool'] = agent_info['pool']['name']\n agent['build_type'] = ctx.obj.get_agent_build_type(agent['id'])\n agent['build_text'] = ctx.obj.get_agent_build_text(agent['id'])\n\n if output_format == 'table':\n column_names = columns.split(',')\n output_table(column_names, data['agent'])\n elif output_format == 'json':\n output_json_data(data)",
"def testGetHostConfigs_all(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n hosts = pool.GetHostConfigs()\n self.assertEqual(6, len(hosts))"
] | [
"0.5522886",
"0.533993",
"0.5265402",
"0.52284235",
"0.5223923",
"0.5155312",
"0.5122521",
"0.5122521",
"0.5117879",
"0.50871843",
"0.5086171",
"0.505273",
"0.5049074",
"0.5049074",
"0.5049074",
"0.49887353",
"0.49772704",
"0.497659",
"0.49645105",
"0.494614",
"0.49298245",
"0.49221227",
"0.49180844",
"0.4916",
"0.49030092",
"0.48923257",
"0.48863012",
"0.48777267",
"0.4864638",
"0.4863723"
] | 0.8485979 | 0 |
Iterate prior to posterior distribution using input data. | def iterate(self, data):
# Append data to self.data
self.data = np.append(self.data, data)
for i, d in enumerate(data):
update = self.current*self.likelihood(d)
self.current = self._normalize(update)
self.posterior = np.concatenate((self.posterior,[self.current]))
print(str(len(data)) + " iterations completed!")
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _process(self, data: np.ndarray) -> np.ndarray:\n probabilities = np.empty(data.size, dtype=object)\n\n for idx, counts_dict in enumerate(data):\n shots = sum(counts_dict.values())\n freq = counts_dict.get(self._outcome, 0)\n alpha_posterior = [freq + self._alpha_prior[0], shots - freq + self._alpha_prior[1]]\n alpha_sum = sum(alpha_posterior)\n\n p_mean = alpha_posterior[0] / alpha_sum\n p_var = p_mean * (1 - p_mean) / (alpha_sum + 1)\n\n probabilities[idx] = ufloat(nominal_value=p_mean, std_dev=np.sqrt(p_var))\n\n return probabilities",
"def posterior_sample(self):\n pass",
"def sample_from_prior(self, *args, **kwargs):\n pass",
"def posterior_distr(self, y, **args):\n raise NotImplementedError",
"def computePosterior(self):\n # in their log form, posterior = prior + beta * datalikelihood\n # make a copy of prior at first\n self.posterior.copy(self.prior)\n # add the data likelihood\n altar.blas.daxpy(self.beta, self.data, self.posterior)\n # all done\n return self",
"def posterior(self, samples):\n unique_samps = set(samples)\n denominator = 0\n posteriors = []\n n_samps = len(samples)\n for concept in self.concepts:\n num = 0\n if unique_samps.issubset(set(concept.extension)):\n num = concept.prior*concept.likelihood(n_samps)\n denominator += num\n posteriors.append(num)\n return np.divide(posteriors, denominator)",
"def posterior_distribution(x, t, M, noise_precision, prior_mu, prior_precision):\n A = np.array([x ** i for i in range(M)]).reshape((1, M)) # (M, 1)\n\n new_precision = prior_precision + noise_precision * np.dot(np.transpose(A), A)\n new_mu = np.dot(np.linalg.inv(new_precision), noise_precision * t * np.transpose(A) + np.dot(prior_precision, prior_mu))\n\n return new_mu, new_precision",
"def post_predictive_distribution(self, samples):\n post_pred_dist = []\n posteriors = self.posterior(samples)\n for point in range(1, self.max_val+1):\n post_pred = 0\n for concept, posterior in list(zip(self.concepts, posteriors)):\n if point in concept.extension:\n post_pred += posterior\n post_pred_dist.append(post_pred)\n return post_pred_dist",
"def iterate_pagerank(corpus, damping_factor):\n distribution = dict()\n corpus_length = len(corpus)\n for u in corpus: #On first iteration, each page is equally likely.\n distribution[u] = 1.0 / corpus_length\n\n difference = 1.0\n max_difference = 0.0\n while ( difference > 0.001 ):\n old_distribution = distribution.copy()\n for u in corpus: #Page we are currently looking at\n prob = (1.0 - damping_factor) / corpus_length\n for x in corpus:\n if u == x:\n continue\n if u in corpus[x]:\n links = list(corpus[x])\n prob += damping_factor * (distribution[x] / len(links))\n distribution[u] = prob\n difference = abs(distribution[u] - old_distribution[u])\n if difference > max_difference: max_difference = difference\n return distribution",
"def _preprocess(self, data):\n\n # pipeline: first call the previous statistics:\n if self.previous_statistics is not None:\n data = self.previous_statistics.statistics(data)\n # the first of the statistics need to take list as input, in order to match the API. Then actually the\n # transformations work on np.arrays. In fact the first statistic transforms the list to array. Therefore, the\n # following code needs to be called only if the self statistic is the first, i.e. it does not have a\n # previous_statistic element.\n else:\n data = self._check_and_transform_input(data)\n\n return data",
"def sample_from_prior(self):\n raise NotImplementedError",
"def _updateInitialProbabilities(self): \n N = self.N\n K = self.K\n\n for i in range(1,self.K+1):\n s = 0\n updated_prob = 0\n for n in range(1,self.N+1):\n s = s+1\n updated_prob = updated_prob + self.posterior_state_trellis[n][(1,i)]\n self.state_initial_prob[i] = (updated_prob/s)",
"def posterior(mu, x, sigma):\n post = like(x, sigma, mu) * prior(mu)\n evidencia = np.trapz(post, mu)\n return post/evidencia",
"def set_prior(self,field):\n self.observation_thresholds = [i/self.observations for i in range(0,self.observations)]\n self.observation_samples = 1\n # TODO: For use after integrating image processing with MCESP for Game-Delayed Reinforcements\n # self.norm = field.max()",
"def iterate_pagerank(corpus, damping_factor):\n # List all pages in corpus\n pages = list(corpus.keys())\n # {p: i}\n links = dict()\n\n # Fix corpus\n for p in corpus.keys():\n # If no links, then it has one link for every page in corpus\n if corpus[p] == set():\n corpus[p] = set(pages)\n \n for page in pages:\n links[page] = []\n for p in corpus.keys():\n if page in corpus[p]:\n links[page].append(p)\n #print(corpus)\n #print(links)\n\n probabilities = dict()\n updated_probabilities = dict()\n\n # Initial PR = 1/N\n for p in corpus.keys():\n probabilities[p] = 1 / len(corpus.keys())\n updated_probabilities[p] = float(0)\n\n # PR differences\n d = {k: abs(probabilities[k] - updated_probabilities[k]) for k in probabilities if k in updated_probabilities}\n\n # Recalculate\n i = 0\n p_corpus = (1 - damping_factor) / len(corpus)\n while max(d.values()) > 0.001:\n for p in corpus.keys():\n p_link = 0\n # Links\n for lp in links[p]:\n if (i % 2) == 0:\n p_link += (probabilities[lp] / len(corpus[lp]))\n else:\n p_link += (updated_probabilities[lp] / len(corpus[lp]))\n pr = p_corpus + (damping_factor * p_link)\n\n # Update probabilities or updated_probabilities dictionary\n if (i % 2) == 0:\n updated_probabilities[p] = pr\n else:\n probabilities[p] = pr\n \n # Increase count\n i += 1\n\n # Update differences dictionary\n d = {k: abs(probabilities[k] - updated_probabilities[k]) for k in probabilities if k in updated_probabilities}\n #print(\"P\", \"\\033[93m {}\\033[00m\" .format(probabilities))\n #print(\"UP\", \"\\033[96m {}\\033[00m\" .format(updated_probabilities))\n #print(\"D\", \"\\033[91m {}\\033[00m\" .format(d))\n\n # When PR's do not change by > 0.001\n return probabilities",
"def gaussian_prior(self):\n self.prior = sps.multivariate_normal(self.m0,self.S0)",
"def _iter_distributions(self) -> Iterator[\"BaseDistribution\"]:\n raise NotImplementedError()",
"def prob_given(self, posterior, prior):\n\t # print \"posterior, prior\", posterior, prior\n\t return self.prob(merge(prior, posterior)) / self.prob(prior) if self.prob(prior) else 0",
"def compute_posterior(prior, likelihood, y):\n\n # -------------------------------------------------------------------------\n # ERROR CHECKS -- DO NOT MODIFY\n #\n\n # check that prior probabilities sum to 1\n if np.abs(1 - np.sum(prior)) > 1e-06:\n exit('In compute_posterior: The prior probabilities need to sum to 1')\n\n # check that likelihood is specified as a 2D array\n if len(likelihood.shape) != 2:\n exit('In compute_posterior: The likelihood needs to be specified as ' +\n 'a 2D array')\n\n K, M = likelihood.shape\n\n # make sure likelihood and prior agree on number of hidden states\n if len(prior) != M:\n exit('In compute_posterior: Mismatch in number of hidden states ' +\n 'according to the prior and the likelihood.')\n\n # make sure the conditional distribution given each hidden state value sums\n # to 1\n for m in range(M):\n if np.abs(1 - np.sum(likelihood[:, m])) > 1e-06:\n exit('In compute_posterior: P(Y | X = %d) does not sum to 1' % m)\n\n #\n # END OF ERROR CHECKS\n # -------------------------------------------------------------------------\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE FOR PART (b)\n #\n # Place your code to compute the log of the posterior here: store it in a\n # NumPy array called `log_answer`. If you exponentiate really small\n # numbers, the result is likely to underflow (i.e., it will be so small\n # that the computer will just make it 0 rather than storing the right\n # value). You need to go to log-domain. Hint: this next line is a good\n # first step.\n log_prior = np.log(prior)\n# print(log_prior)\n# print(likelihood)\n# print(y)\n unnormal = log_prior + np.log(likelihood[y,:]).sum(axis=0)\n# print(unnormal)\n log_answer = unnormal - scipy.misc.logsumexp(unnormal)\n# print(log_answer)\n\n #\n # END OF YOUR CODE FOR PART (b)\n # -------------------------------------------------------------------------\n\n # do not exponentiate before this step\n posterior = np.exp(log_answer)\n return posterior",
"def bd_process_model_probability(data,\n fitness_prior=flat_fitness_prior,\n N_w_prior=flat_N_w_prior,\n mutation_object=True):\n\n if mutation_object is True:\n trajectories = data.data\n else:\n trajectories = data\n\n ind_likelihood = []\n for traj in trajectories:\n int_s = []\n for s in fitness_prior[0, :]:\n int_N_w = []\n for N_w in N_w_prior[0, :]:\n int_N_w.append(\n bd_process_conditional_likelihood_s_N(traj,\n s=s, N_w=N_w)\n )\n int_s.append(np.trapz(x=N_w_prior[0, :],\n y=int_N_w*N_w_prior[1, :]))\n\n marginalised_likelihood = np.trapz(x=fitness_prior[0, :],\n y=int_s*fitness_prior[1, :])\n ind_likelihood.append(marginalised_likelihood)\n \n mutation_prob = np.product(ind_likelihood)\n\n if mutation_object is True:\n # return updated model_comparison object \n data.bd_prob = mutation_prob\n return data\n else:\n # return marginalised likelihood.\n return mutation_prob",
"def prep(self):\n \n # create a dict with prior probabilities\n self.row_priors = [0.0]*len(self.rows)\n self.feature_priors = dict()\n \n # denominator is given by reference priors\n denominator = sum(self.column_priors)\n # null_feature_prior is used when feature is not observed at all\n # this is set up to scale with features, i.e. arbitrarily adding\n # child features into an ontology should not skew sums over repr.\n null_feature_prior = 1/max(denominator, float(len(self.rows)))\n \n for rowname, rowindex in self.rows.items(): \n numerator = 0\n for colname, colindex in self.columns.items(): \n colprior = self.column_priors[colindex]\n numerator += self.data[colindex][rowindex]*colprior\n if numerator == 0:\n numerator = null_feature_prior \n self.row_priors[rowindex] = float(numerator)/denominator\n self.feature_priors[rowname] = self.row_priors[rowindex]\n\n return self",
"def calc_posterior(likelihood, prior, norm_list):\n Pa = 0\n \n for t in norm_list:\n x = t[0] * t[1]\n Pa+=x\n\n return (likelihood*prior)/Pa",
"def process_custom_prior(prior) -> Tuple[Distribution, int, bool]:\n\n check_prior_methods(prior)\n\n check_prior_batch_behavior(prior)\n\n prior, is_prior_numpy = maybe_wrap_prior_to_pytorch(prior)\n\n parameter_dim = prior.sample().numel()\n\n return prior, parameter_dim, is_prior_numpy",
"def test_3_prior(self):\n print(\"test 3: prior probabilities\")\n\n for i, x in enumerate(self.X):\n print(i+1, prior_probability(\n x, self.means, self.dispersions, self.cluster_probabilities\n ), sep=' : ')",
"def update_posterior_probs(vars_):\n vars_.weighted_sums += np.power(vars_.dprime_map[vars_.focus],2) * vars_.visual_field\n vars_.post_probs = np.exp(vars_.weighted_sums) * vars_.prior_prob\n vars_.post_probs /= np.sum(vars_.post_probs)",
"def sample_gil_from_data(self, X_d, loop_iters=5):\n data_samples = []\n prior_samples = []\n X_c = 0.0 * X_d\n X_m = 0.0 * X_d\n for i in range(loop_iters):\n # record the data samples for this iteration\n data_samples.append(1.0 * X_d)\n # sample from their inferred posteriors\n X_p = self.IN.sample_posterior(X_d, X_c, X_m)\n # record the sampled points (in the \"prior space\")\n prior_samples.append(1.0 * X_p)\n # get next data samples by transforming the prior-space points\n X_d = self.GN.transform_prior(X_p)\n result = {\"data samples\": data_samples, \"prior samples\": prior_samples}\n return result",
"def before_each(self, dataset: pydicom.dataset.Dataset) -> None:",
"def prior_sample(self):\n pass",
"def calc_prob_prior(iterations, lam):\n return list(map(lambda x: math.exp(-lam * x), range(iterations)))",
"def p_prior(self):\n sampler = self.__sampler\n nwalkers = self.nwalkers\n pRanges = self.pRanges\n if sampler == \"EnsembleSampler\":\n p = [posRange(pRanges) for i in range(nwalkers)]\n elif sampler == \"PTSampler\":\n ntemps = self.ntemps\n p = np.zeros((ntemps, nwalkers, self.ndim))\n for loop_t in range(ntemps):\n for loop_w in range(nwalkers):\n p[loop_t, loop_w, :] = posRange(pRanges)\n return p"
] | [
"0.6235733",
"0.6220724",
"0.59709436",
"0.5916188",
"0.5812615",
"0.5801119",
"0.57054955",
"0.5677082",
"0.56668025",
"0.5654472",
"0.5648874",
"0.56254435",
"0.55976546",
"0.5591801",
"0.5586015",
"0.55826384",
"0.5580829",
"0.55756706",
"0.5572443",
"0.55515474",
"0.55488694",
"0.55463785",
"0.5525445",
"0.55224013",
"0.55179685",
"0.5514963",
"0.55098355",
"0.54941994",
"0.54751915",
"0.54649514"
] | 0.69863427 | 0 |
Calculates credible interval for any probability distribution given input interval for cdf. | def credible_interval(self, distType='current', interval=(0.025, 0.975)):
# Calculate cdf to use for credible interval
distCred = self.cumulative_distribution(dist=distType)
# Prior and Current credible intervals
if (distType=='current' or distType=='prior'):
minCred = self.hypotheses[np.where((distCred-interval[0])>0)[0].min()]
maxCred = self.hypotheses[np.where((distCred-interval[1])>0)[0].min()]
ci = [(minCred, maxCred)]
# Posterior: all iterations credible intervals
else:
ci = []
for i, row in enumerate(distCred):
minCred = self.hypotheses[np.where((distCred[i]-interval[0])>0)[0].min()]
maxCred = self.hypotheses[np.where((distCred[i]-interval[1])>0)[0].min()]
ci.append((minCred, maxCred))
return ci | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_credible_interval(vals, weights, confidence: float = 0.95):\n if confidence <= 0.0 or confidence >= 1.0:\n raise ValueError(\n f\"Confidence {confidence} must be in the interval (0.0, 1.0).\"\n )\n alpha_lb = 0.5 * (1.0 - confidence)\n alpha_ub = confidence + alpha_lb\n lb = compute_quantile(vals, weights, alpha_lb)\n ub = compute_quantile(vals, weights, alpha_ub)\n return lb, ub",
"def credible_interval(self, parameter, interval=[0.05, 0.95]):\n\n if parameter not in self.parameters:\n raise ValueError(f\"Parameter '{parameter}' is not available\")\n\n intervals = {}\n for key, value in self.results.items():\n if isinstance(value, Grid):\n intervals[key] = Plot._credible_interval_grid(\n value, parameter, interval\n )\n else:\n credint = value.posterior[parameter].quantile(interval).to_list()\n intervals[key] = credint[0] if len(interval) == 1 else credint\n\n return list(intervals.values())[0] if len(self.results) == 1 else intervals",
"def range_probability_cdf(mean, devi, range_low, range_high):\r\n # 1 / (2 * pi * deviation**2) = x\r\n # e ** -((range_num - mean)**2 / 2*deviation**2 = y\r\n # area = y/x\r\n\r\n large = norm.cdf(range_high, mean, devi)\r\n print(\"scipy large area = \", large)\r\n small = norm.cdf(range_low, mean, devi)\r\n print(\"scipy small area = \", small)\r\n range_area = large - small\r\n message = f\"The area in range {range_low} - {range_high} is {range_area}\"\r\n return range_area",
"def get_confidence_interval(self,a,b):\n\t\tk_vals,prob_vals = self.tuple_of_probabilities\n\t\tworking_indices = [i for i,v in enumerate(k_vals) if (v >= a and v<= b)]\n\t\tworking_prob_vals = [prob_vals[i] for i in working_indices]\n\t\treturn sum(working_prob_vals)",
"def test_conf_interval_ecdf_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n\n # ``quantile_estimation_method = \"ecdf\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"ecdf\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n pred_df[ERR_STD_COL] = round(pred_df[ERR_STD_COL], 2)\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.32, 289.38, 291.3, 291.34), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.63, -5.56, -4.13, -4.08), (\n \"quantiles are incorrect\")\n expected_stds = [0.29, 0.42, 0.42, 0.42, 0.42, 0.58, 0.58, 0.58, 0.58, 0.58,\n 0.58, 0.42]\n assert list(pred_df[ERR_STD_COL].values) == expected_stds",
"def get_interval_from_confidence_file(self, interval_dict):\n for arc in self.arc_info.keys():\n weight = self.arc_info[arc][\"weight\"]\n if weight == 0:\n interval = [0, 0]\n else:\n interval = interval_dict[weight]\n ub = interval[1]\n lb = interval[0]\n self.arc_info[arc][\"upper_bound\"] = ub\n self.arc_info[arc][\"lower_bound\"] = lb",
"def mycdf(mean, devi, range_low, range_high):\r\n\r\n devi_square = float(devi**2)\r\n low_e_num = math.exp(-((float(range_low) - float(mean))**2 / (2*devi_square)))\r\n denom = float( math.sqrt(2 * math.pi * devi_square) )\r\n high_e_num = math.exp(-((float(range_high) - float(mean))**2 / (2*devi_square)))\r\n low_area = float(low_e_num / denom)\r\n high_area = float(high_e_num / denom)\r\n if range_low > mean:\r\n low_area = 1 - low_area\r\n if range_high > mean:\r\n high_area = 1 - high_area\r\n print(\"my high_area = \", high_area)\r\n print(\"my low_area = \", low_area)\r\n under_curve = high_area - low_area\r\n message = f\"The area under the curve for range {range_low} - {range_high} = {under_curve}\"\r\n return under_curve",
"def _compute_register_bounds(cls, num_values, probability):\n bits = np.arange(1, num_values + 1)\n probs = scipy.stats.geom.cdf(bits, probability)\n return probs / probs[-1]",
"def cdf(self, alpha): #Plot empirical cfd with confidence interval\n x = self.x\n n = len(x)\n y = np.arange(1, n+1)/n\n \n #Computing confidence interval with the Dvoretzky–Kiefer–Wolfowitz method based on the empirical points\n F1 = []\n F2 = []\n for i in range(0, n):\n e = (((mt.log(2/alpha))/(2*n))**0.5) \n F1.append(y[i] - e)\n F2.append(y[i] + e) \n plt.plot(sorted(x), y, label='Empirical CDF')\n plt.plot(sorted(x), F1, linestyle='--', color='red', alpha = 0.8, lw = 0.9, label = 'Dvoretzky–Kiefer–Wolfowitz Confidence Bands')\n plt.plot(sorted(x), F2, linestyle='--', color='red', alpha = 0.8, lw = 0.9)\n plt.ylabel('Cumulative Distribution Function')\n plt.xlabel('Observed Data')\n plt.legend()\n plt.show()\n \n return(y)",
"def compute_interval_limits(bias, acceleration, n_boots, ci=95):\n from scipy.stats import norm\n from numpy import isnan, nan\n\n alpha = _compute_alpha_from_ci(ci)\n\n alpha_low = alpha / 2\n alpha_high = 1 - (alpha / 2)\n\n z_low = norm.ppf(alpha_low)\n z_high = norm.ppf(alpha_high)\n\n kws = {'bias': bias, 'acceleration': acceleration}\n low = _compute_quantile(z_low, **kws)\n high = _compute_quantile(z_high, **kws)\n\n if isnan(low) or isnan(high):\n return low, high\n\n else:\n low = int(norm.cdf(low) * n_boots)\n high = int(norm.cdf(high) * n_boots)\n return low, high",
"def _credible_interval_grid(grid, parameter, interval):\n\n from pesummary.utils.array import Array\n\n margpost = grid.marginalize_posterior(not_parameters=parameter)\n intervals = Array.percentile(\n grid.sample_points[parameter],\n weights=margpost,\n percentile=[100 * val for val in interval],\n )\n\n return intervals if len(interval) > 1 else intervals[0]",
"def cchalf(dataframe, function, bins):\n dist = dataframe.set_index(['H', 'K', 'L'])['D'].drop_duplicates()\n dmin = dist.min()\n dmax = dist.max()\n binedges = np.linspace(dmin**-2, dmax**-2, bins+1)**-0.5\n binedges = list(zip(binedges[:-1], binedges[1:]))\n a,b = split(dataframe)\n xval_a, xval_b = function(a), function(b)\n#TODO: Fix this awful hack\n key = [i for i in xval_a if i!='D'][0]\n xval_a, xval_b = xval_a.join(dist),xval_b.join(dist)\n idx = xval_a.index.intersection(xval_b.index)\n xval_a,xval_b = xval_a.loc[idx],xval_b.loc[idx]\n cchalf = []\n for dmin,dmax in binedges:\n idx = (xval_a['D'] > dmin) & (xval_a['D'] < dmax)\n a = np.array(xval_a[idx][key]).flatten()\n b = np.array(xval_b[idx][key]).flatten()\n cchalf.append(np.corrcoef(a,b)[0, 1])\n return cchalf, binedges",
"def calcBRange(c,n=10):\n \n bMin = -abs(c)/2.0 \n bMax = abs(c)/2.0 \n return np.linspace(bMin,bMax,n)",
"def cdfFunction(f, x, N):\r\n return ssstats.binom.cdf(x, N, f)",
"def CI(x, alpha=0.05):\n x = np.asarray(x)\n xs = x.size\n s = np.argsort(x)\n c = int((alpha / 2) * xs)\n return Interval(x[s[c-1]], x[s[xs-c]])",
"def _confidence_interval_function(xq, cinfo):\n a = cinfo.a.copy()\n a[cinfo.indx] = xq\n\n yfit, _ = cinfo.fit_function(a, pderflg=False)\n if yfit.dtype in ['complex64','complex128']:\n yfit = np.concatenate([yfit.real,yfit.imag])\n wchisqr1 = np.sum(cinfo.ww*(yfit-cinfo.dat)**2)/cinfo.nfree\n \n goal = abs(wchisqr1-cinfo.wchi*cinfo.factor)\n \n return goal",
"def confidence_interval(self):\r\n coh_var = np.zeros((self.input.data.shape[0],\r\n self.input.data.shape[0],\r\n self._L), 'd')\r\n for i in range(self.input.data.shape[0]):\r\n for j in range(i):\r\n if i != j:\r\n coh_var[i, j] = tsu.jackknifed_coh_variance(\r\n self.spectra[i],\r\n self.spectra[j],\r\n self.eigs,\r\n adaptive=self._adaptive\r\n )\r\n\r\n idx = triu_indices(self.input.data.shape[0], 1)\r\n coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()\r\n\r\n coh_mat_xform = tsu.normalize_coherence(self.coherence,\r\n 2 * self.df - 2)\r\n\r\n lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n\r\n # convert this measure with the normalizing function\r\n tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)\r\n tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)\r\n\r\n return ub - lb",
"def ci_diff_prop(p1, p2, n1, n2, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n prop_diff = p1 - p2\n # find the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n margin_of_error = z_star * (np.sqrt((p1 * (1 - p1) / n1) + (p2 * (1 - p2) / n2)))\n # calculate the lower and upper bound\n lcb = prop_diff - margin_of_error\n ucb = prop_diff + margin_of_error\n print(\n \"{}% Confidence Interval for difference in two Population proportions: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )",
"def rate_density(self, value):\n\n # TODO: analyse for certain that log units cancel out\n # with the change in occr\n\n if value.ndim == 2:\n value = value.T\n\n R_i = np.digitize(value[0], self._R_boundaries) - 1\n P_i = np.digitize(value[1], self._P_boundaries) - 1\n\n # Remove the ones out of bounds (oob_mask = out of bounds mask)\n oob_mask = np.zeros_like(R_i, dtype=bool)\n oob_mask = oob_mask | ((R_i < 0) | (R_i >= np.shape(self.occr)[0]))\n oob_mask = oob_mask | ((P_i < 0) | (P_i >= len(self._P_boundaries)-1))\n\n R_i = R_i[~oob_mask]\n P_i = P_i[~oob_mask]\n\n return self.occr[R_i] * self._cpf_grid[R_i, P_i]",
"def _ci(arr, ci=0.95, method=\"bootstrap\", n_bootstraps=2000, random_state=None):\n if method == \"bootstrap\":\n return bootstrap_confidence_interval(\n arr, ci=ci, n_bootstraps=n_bootstraps, random_state=random_state\n )\n else:\n from .parametric import _parametric_ci\n\n return _parametric_ci(arr, ci=ci)",
"def rvsWithinCDFbounds(self,lowerBound,upperBound):\n randResult = self._distribution.inverseCdf(float(random(1))*(upperBound-lowerBound)+lowerBound)\n return randResult",
"def cdf(self,x):\n if hasattr(x,'__len__'):\n returnCdf = np.array([self.cdf(i) for i in x])\n else:\n returnCdf = self._distribution.cdf(x)\n return returnCdf",
"def get_confidence_interval(\n num_people,\n num_iter=1000000,\n percentile=2.576,\n num_days=365,\n):\n mean = 0.0\n variance = 0.0 # not exactly\n for i in range(1, num_iter + 1):\n x = [randint(1, num_days) for person in range(num_people)]\n x.sort()\n is_consecutive = any(p + 1 == q for (p, q) in zip(x[:-1], x[1:], strict=True))\n is_a_loop = x[0] + num_days - 1 == x[-1]\n is_positive = int(is_consecutive or is_a_loop)\n delta = is_positive - mean\n mean += delta / float(i)\n variance += delta * (is_positive - mean)\n sd = sqrt(variance / float(num_iter - 1))\n lower_bound = mean - percentile * sd / sqrt(num_iter)\n upper_bound = mean + percentile * sd / sqrt(num_iter)\n print(\n \"Number of people: {}\\tLower bound: {:2.5%}\\tUpper bound: {:2.5%}\".format(\n num_people,\n lower_bound,\n upper_bound,\n ),\n )\n return lower_bound, upper_bound",
"def chebint(self, a, b, c, n):\n sum = 0.0\n fac = 1.0\n con = 0.25 * (b - a) # factor that normalizes the interval\n cint = numpy.zeros(n)\n for j in range(1, n - 2):\n cint[j] = con * (c[j - 1] - c[j + 1]) / j\n sum = sum + fac * cint[j]\n fac = - fac\n cint[n - 1] = con * c[n - 2] / (n - 1)\n sum = sum + fac * cint[n - 1]\n cint[0] = 2.0 * sum # set constant of integration.\n return (cint)",
"def test_conf_interval_normal_method_with_bounds(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n # with enforced lower limit (``min_admissible_value``)\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=290.0,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.0, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (290.0, 290.0, 290.0, 290.0), (\n \"quantiles are incorrect\")",
"def ci_prop(p, n, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n # standard error\n std_error = np.sqrt(p * (1 - p) / n)\n # find the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n # margin of error\n margin_of_error = np.round(z_star * std_error, 2)\n # calculate lower and upper confidence bounds\n lcb = np.round(p - margin_of_error, 2)\n ucb = np.round(p + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Proportion: ({}, {})\".format(\n conf_level, lcb, ucb\n )\n )",
"def cdf(self,x):\n sortedMapping = sorted(self.mapping.items(), key=operator.itemgetter(0))\n if x == sortedMapping[-1][0]:\n return 1.0\n if x in self.values:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x == ( float(element[0]) if self.isFloat else element[0] ):\n return cumulative\n else:\n if self.isFloat:\n cumulative=0.0\n for element in sortedMapping:\n cumulative += element[1]\n if x >= element[0]:\n return cumulative\n # if we reach this point we must error out\n self.raiseAnError(IOError,'Categorical distribution cannot calculate cdf for ' + str(x))",
"def cdf(self,x):\n if self.functionType == 'cdf':\n cdfValue = self.cdfFunc(x)\n else:\n cdfValue = self.pdfFunc.integral(self.data[0][0],x)\n return cdfValue",
"def gpdfInt(t1,t2,c,tau):\n return st.gamma.cdf(t2,c,loc=0,scale=tau)-st.gamma.cdf(t1,c,loc=0,scale=tau)",
"def CumulativeDistribution(data, nbins, range=None, normed=True, centerbins=False):\n\n # 1) COMPUTE THE DISTRIBUTION OF THE DATA\n ydata, xdata = np.histogram(data, nbins, range, normed)\n\n # 1.1) Compute the cumulative sum of the probability\n ydata = ydata.cumsum()\n\n # 2) RETURN THE RESULTS\n if centerbins:\n dif = 0.5 * (xdata[-1] - xdata[0]) / nbins\n xdata += dif\n\n if normed:\n norm = 1.0 / ydata[-1]\n ydata *= norm\n\n return xdata[:-1], ydata\n\n else:\n return xdata[:-1], ydata"
] | [
"0.6312664",
"0.60228646",
"0.5892485",
"0.58310425",
"0.56982505",
"0.5682813",
"0.5677748",
"0.5657644",
"0.5619041",
"0.5616922",
"0.55988735",
"0.5576793",
"0.5562751",
"0.55557513",
"0.5543005",
"0.5532123",
"0.55102867",
"0.55094415",
"0.55092555",
"0.5476659",
"0.54524666",
"0.5439542",
"0.542626",
"0.5417793",
"0.5405346",
"0.5402678",
"0.5390032",
"0.5385955",
"0.53728783",
"0.5359358"
] | 0.7396832 | 0 |
Normalize the product of likelihood and prior. | def _normalize(self, inp):
return inp/inp.sum() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalize(X, mu, sigma):\n return (X - mu) / sigma",
"def _normalise(self):\n if not self.is_unit():\n n = self.norm\n if n > 0:\n self.q = self.q / n",
"def normalize(init_probs):\n total_prob = sum(init_probs)\n if total_prob > 0. + InferenceUtils._eps:\n for idx in range(len(init_probs)):\n init_probs[idx] = init_probs[idx] / total_prob\n\n # TODO: check refactor > do we have to return distrib with new instance?\n return init_probs",
"def normalize(x):\r\n return x/norm(x)",
"def normalize(self):\n total = 0.0\n for i in range(0,self.npoints):\n total+=self.y[i]*self._dx\n for i in range(0,self.npoints):\n self.y[i]/=total\n return",
"def normalize(self):\n norm_val = self.sum2/self.sum1\n self.sum1=0\n\n for sentence in self.data_set:\n sentence.weight *= norm_val\n self.sum1 += sentence.weight",
"def stdProbabilityNorm(self):\n return 1./factorial(self.alpha-1)",
"def normalize(self):\n total = self.total()\n for x in self.d:\n self.d[x] /= total\n return total",
"def model_normalize_(self, ref_point: 'ModelParameters', order=2):\n for parameter in self.parameters:\n parameter *= (ref_point.model_norm(order) / self.model_norm())",
"def normalize(self):\n self._data /= self.norm()",
"def normalize(self, factor):",
"def normalize(self):\n return (1. / abs(self)) * self",
"def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)",
"def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)",
"def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X",
"def normalize_l2(x):\n return x / (npla.norm(x))",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=self.norm_type, dim=1)\n self.normal_vectors.data = normalize(self.normal_vectors, p=2, dim=1)",
"def normalize_probability(p_unnormalized):\n p_normalized=p_unnormalized/p_unnormalized.sum(axis=0)\n return p_normalized",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.ent_proj_vects.data = normalize(self.ent_proj_vects.data, p=2, dim=1)\n self.rel_proj_vects.data = normalize(self.rel_proj_vects.data, p=2, dim=1)",
"def normalize(self, external=None) -> np.array:\n return self.y / np.max(self.y) if external is None else self.y / external",
"def normalize_to_prob(inp):\n return (inp + 1)/2",
"def normalize_parameters(self):\n self.entity_embeddings.weight.data = normalize(self.entity_embeddings.weight.data,\n p=2, dim=1)\n self.relation_embeddings.weight.data = normalize(self.relation_embeddings.weight.data,\n p=2, dim=1)\n self.projection_matrices.data = normalize(self.projection_matrices.data, p=2, dim=2)",
"def filter_normalize_(self, ref_point: 'ModelParameters', order=2):\n for l in range(len(self.parameters)):\n # normalize one-dimensional bias vectors\n if len(self.parameters[l].size()) == 1:\n self.parameters[l] *= (ref_point.parameters[l].norm(order) / self.parameters[l].norm(order))\n # normalize two-dimensional weight vectors\n for f in range(len(self.parameters[l])):\n self.parameters[l][f] *= ref_point.filter_norm((l, f), order) / (self.filter_norm((l, f), order))",
"def normalize(self):\n return Vector(self.args + []) / self.magnitude()",
"def stdProbabilityNorm(self):\n B = factorial(self.alpha-1)*factorial(self.beta-1)/factorial(self.alpha+self.beta-1)\n norm = 1.0/(2**(self.alpha+self.beta-1)*B)\n return norm",
"def normalize_weights(self):\n total_weight = sum(self.weights)\n self.norm_weights = self.weights / float(total_weight)",
"def normalized(self):\n return self / self.norm()",
"def normalize_weights(self):\n \n # Set negative weights to zero\n # Normalize to sum to one.\n \n\n\n self.new_weight=[]\n for i in self._weights:\n if any(i < 0 for i in self._weights):\n self.new_weight = [0,1]\n\n elif all(i == 0 for i in self._weights):\n i = 1/len(self._weights)\n self.new_weight.append(i)\n else:\n i = i/sum(self._weights)\n self.new_weight.append(i)\n\n # If the weights are all zeros, set weights equal to 1/k, where k is the number\n # of components.\n self._weights = self.new_weight\n self._weights = np.round(self._weights,3)",
"def normalise(x):\n return (x - jnp.min(x)) / (jnp.max(x) - jnp.min(x))",
"def _normalize(self, dataset):\n if self.max is None: # if we are normalizing the training set\n self.max, self.min = dataset.max(), dataset.min() # find max, min value for each columns\n for row in dataset.index: # for each row in dataset\n for col in self.features: # for each feature in the instance (exclude target)\n dataset.at[row, col] = (dataset.at[row, col] - self.min[col]) / (self.max[col] - self.min[col]) if col != \"Bias\" else 1"
] | [
"0.68239534",
"0.67643946",
"0.6680338",
"0.66775995",
"0.66477513",
"0.66317827",
"0.6610939",
"0.65681386",
"0.6563714",
"0.65164256",
"0.6468991",
"0.6431344",
"0.6422754",
"0.6422754",
"0.6386382",
"0.63844436",
"0.63793725",
"0.6371887",
"0.63595843",
"0.63073415",
"0.63022774",
"0.63011914",
"0.6296997",
"0.6289034",
"0.6277023",
"0.62377846",
"0.62096643",
"0.61980057",
"0.6197396",
"0.6193082"
] | 0.680125 | 1 |
Longest run testcases with more than one target | def test_longest_run_mult(self):
self.assertTrue(geneutil.longestRun('QQQQN','QN')==5)
self.assertTrue(geneutil.longestRun('QQANNQ','QN',1)==6)
self.assertTrue(geneutil.longestRun('QQNPPQ','QN',1)==3)
self.assertTrue(geneutil.longestRun('QQQAANN','QN',2)==7)
self.assertTrue(geneutil.longestRun('ANQNQAN','QN',1)==6)
self.assertTrue(geneutil.longestRun('ANQNQANP','QN',1)==6) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getResult(targets, i=None):",
"def test_which_targets():\n num_multi_targets = 0\n for which_targets_day in which_targets:\n # All inputs have a label\n assert np.all(which_targets_day.sum(axis=1) > 0)\n # No inputs have more than 3 targets\n assert np.all(which_targets_day.sum(axis=1) < 4)\n\n num_multi_targets += np.sum(which_targets_day.sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0",
"def longest_ORF_unit_tests():\n\n # YOUR IMPLEMENTATION HERE",
"def longest_ORF_unit_tests():\n\n # YOUR IMPLEMENTATION HERE",
"def run_automatic_tester():\n number_of_target_maps = len(os.listdir(TargetDetectionTesterSettings.TARGET_DETECTION_REPORT_JSON_FILE_SAVE_PATH))\n overall_true_positive_count = 0\n overall_false_positive_count = 0\n overall_target_count = 0\n\n for index_0 in range(number_of_target_maps):\n\n answer_sheet = json.load(open(os.path.join(TargetDetectionTesterSettings.TARGET_MAP_ANSWER_SHEET_PATH, str(index_0 + 1) + \".json\")))\n answer_list = []\n\n for index_1 in range(len(answer_sheet[\"targets\"])):\n answer_list.append((answer_sheet[\"targets\"][index_1][\"target_center_coordinates\"][0], answer_sheet[\"targets\"][index_1][\"target_center_coordinates\"][1]))\n overall_target_count += len(answer_list)\n\n target_detection_result = json.load(open(os.path.join(TargetDetectionTesterSettings.TARGET_DETECTION_REPORT_JSON_FILE_SAVE_PATH, str(index_0 + 1) + \".json\")))\n result_list = []\n\n for index_2 in range(len(target_detection_result[\"image_processing_results\"])):\n result_list.append((target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][0] + (target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][2] / 2), target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][1] + (target_detection_result[\"image_processing_results\"][index_2][\"target_location\"][3] / 2)))\n\n current_true_positive_count = 0\n current_false_positive_count = 0\n banned_index_list = []\n\n for index_3 in range(len(answer_list)):\n true_positive_found = False\n\n for index_4 in range(len(result_list)):\n is_index_4_banned = False\n\n for index_5 in range(len(banned_index_list)):\n if (index_4 == banned_index_list[index_5]):\n is_index_4_banned = True\n\n if (is_index_4_banned == True):\n continue\n\n correct_target_center_x = answer_list[index_3][0]\n correct_target_center_y = answer_list[index_3][1]\n\n detected_target_center_x = result_list[index_4][0]\n detected_target_center_y = result_list[index_4][1]\n\n if ((abs(correct_target_center_x - detected_target_center_x) <= 20) and (abs(correct_target_center_y - detected_target_center_y) <= 20)):\n current_true_positive_count += 1\n banned_index_list.append(index_4)\n true_positive_found = True\n continue\n\n current_false_positive_count = len(result_list) - current_true_positive_count\n\n overall_true_positive_count += current_true_positive_count\n overall_false_positive_count += current_false_positive_count\n\n percentage = 100 * float(overall_true_positive_count) / (overall_target_count)\n\n TargetDetectionTesterLogger.log(\"--------------------------------------------------\")\n TargetDetectionTesterLogger.log(\"Total True Positive Count: \" + str(overall_true_positive_count))\n TargetDetectionTesterLogger.log(\"Total False Positive Count: \" + str(overall_false_positive_count))\n TargetDetectionTesterLogger.log(\"Percentage of Successfully Detected Targets: \" + str(percentage) + \"%\")\n TargetDetectionTesterLogger.log(\"--------------------------------------------------\")",
"def test_target_number_less_than_alp(self):\n alp = list(range(10))\n targets = generate_targets(alp, 5)\n self.assertEqual(len(targets), 5)\n self.assertEqual(len(targets), len(set(targets)))",
"def target_multi_objective1(\n config: Configuration,\n seed: int,\n # instance: str,\n # budget: float,\n) -> list[float]:\n return [seed, seed]",
"def run_mcts(self, runs_per_round):\n for i in range(runs_per_round):\n self.select(self.env, 'r')\n self.env_reset()\n counts = [self.Nsa[('r', a)] for a in range(self.actions)]\n # print(\"counts \", counts)\n # print(\"Q-values\", [self.Qsa[('r', a)] for a in range(self.actions)])\n # print()\n return np.argmax(counts)",
"def test_target_greater_than_alp(self):\n alp = list(range(5))\n targets = generate_targets(alp, 10)\n self.assertEqual(len(targets), 10)\n\n counts = Counter(targets)\n\n for item in alp:\n self.assertEqual(counts[item], 2)",
"def get_n_best(self):\n pass",
"def get_test_suite():\n # max for a and p\n MAX = 2**31 - 1 # INT32_MAX, max value for a and p\n sqrt_MAX = floor(sqrt(MAX)) # max for n\n \n # first test suite\n a_list = [0, 0, 0, 1, 1, 2, 7, 2, 1, 0, 0, 3, 1, 0, 0, 0, 1]\n p_list = [5, 3, 3, 0, 0, 0, 8, 1, 1, 0, 0, 0, 0, 1, 2, 0, 1]\n n_list = [7, 2, 2, 7, 3, 3, 0, 0, 0, 1, 2, 0, 0, 0, 0, 0, 1]\n\n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite\n \n # second test suite\n a_list = [3, 5, 23, 25, 100, 200, MAX, MAX-1, MAX]\n p_list = [10, 5, 23, 25, 100, 200, 1000, 100, 500]\n n_list = [23, 1, 0, 7, 1, 100, sqrt_MAX, 3, 23]\n \n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite\n\n # third test suite\n a_list = []\n p_list = []\n n_list = []\n\n # keep a = 0\n for _ in range(10):\n a_list.append(0)\n p_list.append(random.randint(0, 5000))\n n_list.append(random.randint(0, sqrt_MAX))\n # keep p = 0\n for _ in range(10):\n a_list.append(random.randint(0, MAX))\n p_list.append(0)\n n_list.append(random.randint(0, sqrt_MAX))\n # keep n = 0\n for _ in range(10):\n a_list.append(random.randint(0, MAX))\n p_list.append(random.randint(0, 5000))\n n_list.append(0)\n # keep a = 0 and p = 0\n for _ in range(10):\n a_list.append(0)\n p_list.append(0)\n n_list.append(random.randint(0, sqrt_MAX))\n # keep all non-zero\n for _ in range(30):\n a_list.append(random.randint(0, MAX))\n p_list.append(random.randint(0, 5000))\n n_list.append(random.randint(0, sqrt_MAX))\n\n suite = get_one_suite(a_list, p_list, n_list, MAX, sqrt_MAX)\n yield suite",
"def main(keep_best_count, mutation_factor, rounds, target, stagnate):\n ways = [range(len(DISTANCES))]\n result = {'round':0,'cost':None}\n for i in range(rounds):\n ways = mutate(ways,mutation_factor)\n best = []\n for way in ways:\n best.append((rate(way),way))\n best.sort()\n if VERBOSITY:\n for way in best:\n print way\n print \"Round %d best way is %s\" % (i+1, best[0][0])\n # break if we hit the target\n if best[0][0] <= target:\n print \"Hit Target\"\n break\n # break if we stagnate to long\n if result['cost'] is None or best[0][0] <result['cost']:\n result['cost'] = best[0][0]\n result['round'] = i+1\n elif result['round'] + stagnate <= i+1:\n print \"Stagnate to long\"\n break\n ways = list(b[1] for b in best[0:keep_best_count])\n print \"\"\n print \"best found order with cost=%d\" % best[0][0]\n print ' '.join(list(NAMES[i] for i in best[0][1]))\n print \"\"",
"def solutionByOthers(self, nums, target):\n nums.sort()\n results = []\n\n self._findNSum( nums, target, 4, [], results )\n return results",
"def count_target(self):\n tally = {}\n for obj in self.target:\n tally[obj] = 0\n\n ind = 0\n for label in self.labelList:\n filename = self.pathLabel + label\n f = open(filename, 'r')\n content = f.read().split('\\n')\n for line in content:\n items = line.split(' ')\n if items[0] in self.target:\n tally[items[0]] += 1\n f.close()\n if ind % 100 == 0:\n print(f'[COUNT] {ind} of {len(self.labelList)} processed')\n ind += 1\n \n print('[COUNT] done counting targets in dataset')\n print(tally)",
"def findmaxidx(datasets, target='atom_label'):\n\n if target == 'atom_label':\n return _findmaxidx(datasets, 0)\n elif target == 'wle_label':\n return _findmaxidx(datasets, 2)",
"def test_where_targets():\n num_multi_targets = 0\n for where_targets_day in where_targets:\n # All inputs have a label\n assert np.all(where_targets_day.sum(axis=3).sum(axis=3).sum(axis=1).sum(axis=1) > 0)\n num_multi_targets += np.sum((where_targets_day.sum(axis=3).sum(axis=3).sum(axis=2) > 1).sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0",
"def count_targets(searchList):\n targets = {}\n for x in searchList:\n loVal = -10000 - x\n hiVal = 10000 - x\n loInd = bisect_left(searchList, loVal)\n hiInd = bisect_right(searchList, hiVal)\n for y in searchList[loInd:hiInd]:\n if y == x:\n continue\n t = x + y\n targets[t] = 1\n return len(targets)",
"def gen_jobs(lower_idx, upper_idx, target=\"llvm\"):\n return [LorienTestWorkload(target, idx).to_job() for idx in range(lower_idx, upper_idx)]",
"def evaluate(self, test_data):\n test_results = [(np.argmax(self.feedforward(x)), np.argmax(y)) #argmax 返回最大数的索引\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)",
"def evaluate(self, test_data):\r\n test_results = [(np.argmax(self.feedforward(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return sum(int(x == y) for (x, y) in test_results)",
"def evaluate1_6(self, test_data):\r\n test_results = [(np.argmax(self.feedforward1_6(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return sum(int(x == y) for (x, y) in test_results)",
"def evaluate2_5_1(self, test_data):\r\n test_results = [(np.argmax(self.feedforward(x)), y)\r\n for (x, y) in test_data]\r\n #print(self.feedforward(test_data[0][0]))\r\n #print(test_data[0][1])\r\n return test_results",
"def greedy_search(self,inputs,states=None,max_len=20):\n \n ids_list = list()\n for i in range(max_len):\n hiddens,states = self.lstm(inputs,states)\n outputs = self.linear(hiddens.squeeze(1))\n #Get the most likely integer to represent the token\n \n predicted = outputs.argmax(1)\n ids_list.append(predicted.item())\n inputs = self.embed(predicted)\n inputs = inputs.unsqueeze(1)\n return ids_list",
"def main():\n ngon_sols = find_all_ngon_sols()\n ngon_strs = set(str(ngon) for ngon in ngon_sols)\n ngon_ints = (int(ngon_str) for ngon_str in ngon_strs\n if len(ngon_str) == TARGET_LEN) \n\n print(\"Max ngon integer is {}\".format(max(ngon_ints)))",
"def run_test_cases(self):\n count = 1\n for test_case in self.test_cases:\n print(\"Running test case #%d\" % count)\n if test_case.name == 'RouteDistance':\n distance = self.get_distance_for_route(test_case.args)\n print('%s distance: %s' % (test_case.args, distance))\n elif test_case.name == 'RouteShortest':\n args = test_case.args.split('|')\n shortest_distance = self.find_shortest_path_between_cities(args[0], args[1])\n print(\"Shortest distance between %s and %s: %d\" % (args[0], args[1], shortest_distance))\n elif test_case.name == 'RouteLessThanHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with hops less than or equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteEqualHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]), equal=True)\n print('Paths between %s and %s with hops equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteLessThanDistance':\n args = test_case.args.split('|')\n paths = self.trips_distance_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with distance less than %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n else:\n raise Exception('Unknown test case: %s' % test_case.name)\n count += 1\n print()",
"def count_targets(searchList):\n count = 0\n n = len(searchList)\n stop1 = time.time()\n for t in range(-10000, 10001):\n for x in searchList:\n if t - x <= x:\n break\n i = bisect_left(searchList, t - x, hi=n-1)\n if searchList[i] == t - x:\n count += 1\n break\n return count",
"def test_when_targets():\n num_multi_targets = 0\n for when_targets_day in when_targets:\n # All inputs have a label\n assert np.all(when_targets_day.sum(axis=1).sum(axis=1) > 0)\n\n num_multi_targets += np.sum((when_targets_day.sum(axis=2) > 1).sum(axis=1) > 1)\n\n # Some days have multi-targets\n assert num_multi_targets > 0",
"def run(self, target: int) -> list:\n\n valid_values = range(0, 99)\n\n # This will be some sort of brute force attempt\n values = [0, 0]\n while values[0] < 100 and values[1] < 100:\n self.reset()\n# import pdb; pdb.set_trace()\n test = self._process(values[0], values[1])\n if test == target:\n break\n else:\n if values[0] <= 99:\n if values[1] < 99:\n values[1] += 1\n elif values[1] == 99 and values[0] < 99:\n values[0] += 1\n values[1] = 0\n\n else:\n raise OpCodeError(\"No value possible\")\n\n return 100 * values[0] + values[1]",
"def compute_splits(feature_df, target_col, max_num_splits):\n tree_estimator = DecisionTreeClassifier(max_leaf_nodes=max_num_splits+1,\n class_weight='balanced',\n random_state=1407)\n\n tree_estimator.fit(feature_df, target_col)\n thresholds = tree_estimator.tree_.threshold[tree_estimator.tree_.children_left != _tree.TREE_LEAF]\n return sorted(thresholds)",
"def test_find_long_chains(self):\n # a --> d --> j g h --> i\n # b _/ c --> e --> f\n self._build_sample_graph()\n # Adding singleton\n sg = self.skill_graph.add(Skill.build('g', ''))\n # Adding short path\n sh = self.skill_graph.add(Skill.build('h', ''))\n si = self.skill_graph.add(Skill.build('i', ''))\n self.skill_graph.add_prerequisite(si.id, sh.id)\n # Making path longer\n sj = self.skill_graph.add(Skill.build('j', ''))\n self.skill_graph.add_prerequisite(sj.id, self.sd.id)\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).long_chains(2)\n expected = [\n [self.sa.id, self.sd.id, sj.id],\n [self.sb.id, self.sd.id, sj.id],\n [self.sc.id, self.se.id, self.sf.id]\n ]\n self.assertEqual(sorted(expected), sorted(result))"
] | [
"0.5934326",
"0.5803281",
"0.57657754",
"0.57657754",
"0.57301104",
"0.56978285",
"0.55573726",
"0.5550279",
"0.55331385",
"0.5475259",
"0.546607",
"0.54098874",
"0.5349436",
"0.53450775",
"0.53050417",
"0.5281973",
"0.5245072",
"0.5236958",
"0.523272",
"0.51985705",
"0.51941895",
"0.51761985",
"0.5167142",
"0.5166413",
"0.51572186",
"0.51529944",
"0.514977",
"0.5145269",
"0.5138368",
"0.5130986"
] | 0.6047128 | 0 |
Max Sliding Count testcases | def test_max_sliding_count(self):
self.assertTrue(geneutil.maxSlidingCount('AAAAA','A')==5)
self.assertTrue(geneutil.maxSlidingCount('AAAAA','Q')==0)
self.assertTrue(geneutil.maxSlidingCount('AAATAA','A')==4)
self.assertTrue(geneutil.maxSlidingCount('AAATTAA','A')==3)
self.assertTrue(geneutil.maxSlidingCount('MMMMMMMMMMABCABCABCDM','M',10)==10)
self.assertTrue(geneutil.maxSlidingCount('MMMMMMMMMMABCABCABCDM','C',10)==3) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 8\n\n \"\"\"maxi, number_of_dice, ret = 0, 10, 0\n while number_of_dice > 0:\n avg = make_averaged(roll_dice)(number_of_dice, dice)\n maxi = max(maxi, avg)\n if avg >= maxi:\n ret = number_of_dice\n number_of_dice -= 1\n return ret\"\"\"\n\n\n\n counterA = 1\n num_rolls=1\n max_value = 0\n best_num_rolls = 0\n while counterA <= 10:\n num_rolls = counterA\n average_function = make_averaged(roll_dice)(counterA, dice)\n if average_function > max_value:\n max_value = average_function\n best_num_rolls = counterA\n counterA +=1\n return best_num_rolls\n\n \"\"\"counterA = 1\n maxvalue = 0\n maxvaluenumber = 0\n while(counterA<=10):\n num_rolls = counterA\n average_for_roll = make_averaged(roll_dice(num_rolls, dice), num_samples)\n counterB = average_for_roll(roll_dice(counterA, dice))\n if(counterB>maxvalue):\n maxvalue = counterB\n maxvaluenumber = counterA\n counterA +=1\n return maxvaluenumber\"\"\"\n # END PROBLEM 8",
"def get_highest(self, test):\n return",
"def N_states_for_learner(self):\n idx_max = []\n limits = 50, 2*_math.pi, 50, 50, 50, 50, 50, 50, 50\n for idx, limit in enumerate(limits):\n test = [0 for i in xrange(len(limits))]\n check = _arange(-limit,limit,limit/1000.)\n maxi = 0\n for v in check:\n test[idx]=v\n ret = self._state_index(*test)\n maxi = max((maxi, ret[idx]))\n idx_max.append(maxi)\n\n return tuple([idx+1 for idx in idx_max])",
"def count_max(alon):\n return count_max_acc(alon, alon[0], 0, 0)",
"def max_scoring_num_rolls(dice=six_sided, num_samples=1000):\n # BEGIN PROBLEM 9\n \"*** YOUR CODE HERE ***\"\n k, max_value, max_num = 1, 0, 0\n roll = make_averaged(roll_dice, num_samples)\n while k <= 10:\n current_value = roll(k, dice)\n #print('k: ' + str(k) + ' current_value: ' + str(current_value))\n if current_value > max_value:\n max_value, max_num = current_value, k\n k += 1\n return max_num\n # END PROBLEM 9",
"def test_max_begin(self):\n self.assertEqual(max_integer([5, 3, 4, 1]), 5)",
"def max(self):\r\n\t\treturn max(self.sample)",
"def predict(self, test_data):\n count = 0.0\n for testcase in test_data:\n answer = np.argmax(testcase[1])\n prediction = np.argmax(self.feed_forward(testcase[0]))\n count = count + 1 if (answer - prediction) == 0 else count\n return count",
"def count_max_acc(alon, curr_max, count, pos):\n if pos == len(alon):\n return count\n curr_num = alon[pos]\n if curr_num > curr_max:\n curr_max = curr_num\n count = 0\n if curr_num == curr_max:\n count += 1\n return count_max_acc(alon, curr_max, count, pos+1)",
"def test_max_end(self):\n self.assertEqual(max_integer([5, 3, 4, 8]), 8)",
"def evaluate(self, test_data):\n test_results = [(np.argmax(self.feedforward(x)), np.argmax(y)) #argmax 返回最大数的索引\n for (x, y) in test_data]\n return sum(int(x == y) for (x, y) in test_results)",
"def _get_max_estimated_bandit(self)->Bandit:\n # print(\"mus - \", self.mu)\n # print(\"actions - \", np.argmax(self.mu))\n unique, counts = np.unique(self.mu, return_counts=True)\n lens = counts[np.argmax(unique)] \n if lens>1: # if two actions have same argmax\n # then return arbitrarily from those max ones\n maxs = list(np.array(self.bandits)[self.mu==unique[np.argmax(unique)]])\n return np.random.choice(maxs)\n # otherwise return the max one\n return self.bandits[np.argmax(self.mu)]",
"def max_counts(self):\n\n return np.nanmax(self.pre_proc_data)",
"def get_support_max_limit(item, counts):\n return int(max(counts[item] / MIN_ALL_CONF, MIN_SUPPORT))",
"def get_max_run(run):\n max = 0\n max_i = 0\n for i in range(800, 900):\n if int(run[i]) > int(max):\n max = run[i]\n max_i = i\n return max, max_i",
"def MaxTestStat(self):\n return max(self.test_stats)",
"def search_for_maximum(self):\n return self.maximise_aquisition(self.expected_improvement)",
"def test_max_mid(self):\n self.assertEqual(max_integer([5, 3, 8, 4, 1]), 8)",
"def max_value(gameState):\n if terminal_test(gameState): return -1",
"def test_max_two_sources(self):\n metric = self.metric(addition=\"max\")\n measurement = self.measurement(\n metric,\n sources=[self.source(metric, value=\"10\"), self.source(metric, value=\"20\")],\n )\n self.assertEqual(\"20\", measurement[\"count\"][\"value\"])",
"def test_max(doctest):",
"def test_max_events_range(self):\n\n self.log.info(\"Testing max_event counts\")\n enable_failover = True\n timeout_val = 10\n max_plus_1 = CbServer.Failover.MAX_EVENTS + 1\n\n # Set max_events between (min, max)\n for num_events in range(CbServer.Failover.MIN_EVENTS, max_plus_1):\n status = self.rest.update_autofailover_settings(\n enable_failover, timeout_val, maxCount=num_events)\n self.assertTrue(status, \"Failed to set max events=%s\" % num_events)\n self.validate_failover_settings(enable_failover, timeout_val,\n 0, num_events)\n\n for num_events in [0, max_plus_1]:\n self.log.info(\"Testing max_event_count=%s\" % num_events)\n status = self.rest.update_autofailover_settings(\n enable_failover, timeout_val, maxCount=max_plus_1)\n self.assertFalse(status, \"Able to set max events=%s\" % num_events)\n self.validate_failover_settings(enable_failover, timeout_val,\n 0, CbServer.Failover.MAX_EVENTS)",
"def max_score_test(self):\n max_score_tuple = self.results.max_score(molecules=[\"DDSPDLPK\"])\n assert max_score_tuple[0] == 1 # score\n assert max_score_tuple[3].scaling_factor == 100 # intensity\n\n assert self.results.max_score(molecules=[\"_DDSPDLPK_\"]) == [0, None, None, None]\n return",
"def get_max_combination(total_cuts):\n max_pieces = 0\n for i in range(total_cuts):\n result = i * (total_cuts - i)\n if result > max_pieces:\n max_pieces = result\n print(max_pieces)",
"def findMaximal(freqSet):",
"def test_maxIndex(self):\t\t\n self.assertEqual(attempt.maxIndexZ, 113)\n self.assertEqual(attempt.maxIndexW, 134)",
"def max(self, i):\n x=self.val(i,0)\n lm=len(self)\n t=1\n while t<lm:\n y=self.val(i,t)\n if x<y:\n x=y\n t+=1\n return x",
"def test_max_samples(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"max_samples\": 3,\n \"min_gradient\": -1,\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n for val in [0, 1, 2, 3, 2, 1]:\n self.opp.states.set(\"sensor.test_state\", val)\n self.opp.block_till_done()\n\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"on\"\n assert state.attributes[\"sample_count\"] == 3",
"def find_max_with_count(A):\n\n def frmax(lo, hi):\n \"\"\"Use recursion to find maximum value in A[lo:hi+1] incl. count\"\"\"\n if lo == hi: return (0, A[lo])\n\n mid = (lo+hi)//2\n ctleft,left = frmax(lo, mid)\n ctright,right = frmax(mid+1, hi)\n return (1+ctleft+ctright, max(left, right))\n\n return frmax(0, len(A)-1)",
"def max(self):\n maxs = self.client.map(_call_max, self.vecDask, pure=False)\n max_val = - np.inf\n for future, result in daskD.as_completed(maxs, with_results=True):\n if result > max_val:\n max_val = result\n return max_val"
] | [
"0.62596506",
"0.6250178",
"0.5989606",
"0.59886724",
"0.59800094",
"0.5964477",
"0.5924157",
"0.59153247",
"0.587332",
"0.58163136",
"0.58109987",
"0.5752196",
"0.57395124",
"0.5706765",
"0.5683033",
"0.5675851",
"0.5669799",
"0.5661942",
"0.56422496",
"0.56366104",
"0.56272537",
"0.55978036",
"0.55771285",
"0.5576232",
"0.5575976",
"0.55610085",
"0.5495193",
"0.54949117",
"0.5494849",
"0.54705244"
] | 0.7155858 | 0 |
Determine what moves are safe for a player to make. Returns a list of valid actions that player p can make in the given state. | def safe_moves(p, state):
x, y = state['players'][p]['x'], state['players'][p]['y']
moves = []
actions = [(1, 0, 'east'),
(-1, 0, 'west'),
(0, -1, 'north'),
(0, 1, 'south')]
for dx, dy, move in actions:
tx, ty = str(x + dx), str(y + dy)
if tx not in state['cells'] or ty not in state['cells'][tx]:
moves.append(move)
return moves | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def actions(self, state):\r\n\r\n valid_actions = []\r\n # What kind of an action it will be\r\n # 1. Add a new piece to the game.\r\n # 2. Move and existing piece.\r\n new_piece, player = self.new_or_old_piece(state)\r\n\r\n # If we want to place a new piece in the game\r\n if new_piece:\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] == '-':\r\n # (player, to, from)\r\n # Since we are introducing a new piece it's coming from\r\n # an imaginary position i.e. (9, 9)\r\n valid_actions.append((player, (i, j), (9, 9)))\r\n\r\n # when we moving an existing piece in the game\r\n else:\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] != '-':\r\n # Now check for places this player can move from this position\r\n for ii, jj in self.valid_adjacent_positions[(i, j)]:\r\n if state[ii][jj] == '-':\r\n # (player, to, from)\r\n valid_actions.append((state[i][j], (ii, jj), (i, j)))\r\n\r\n return copy.deepcopy(valid_actions)",
"def get_safe_actions(state):\n safe = set()\n if state.player_has_armor(state.ptm):\n\n for action in {U, D, L, R}:\n r1, c1 = TronProblem.move(state.player_locs[state.ptm], action)\n if not (\n state.board[r1][c1] == CellType.WALL\n or TronProblem.is_cell_player(state.board, (r1, c1))\n ):\n safe.add(action)\n return safe\n else:\n for action in {U, D, L, R}:\n r1, c1 = TronProblem.move(state.player_locs[state.ptm], action)\n if not (\n state.board[r1][c1] == CellType.BARRIER\n or state.board[r1][c1] == CellType.WALL\n or TronProblem.is_cell_player(state.board, (r1, c1))\n ):\n safe.add(action)\n return safe",
"def actions(self, state):\n MovementList = []\n #Check if the agent is able to move a box (Left, Down, Right, Up) \n #without moving it into a taboo cell or pushing two blocks (Invalid move)\n #then move the box in the given direction.\n \n possible_moves = [\"Up\", \"Down\", \"Left\", \"Right\"]\n \n worker = state[0]\n boxes = state[1]\n \n # Iterate throguh the moves and make sure they satify constraints\n for move in possible_moves:\n if (move_coords(worker, move) not in self.walls):\n if (move_coords(worker, move) in boxes):\n if move_coords(move_coords(worker, move), move) in self.taboo:\n pass\n else: \n MovementList.append(move)\n else:\n MovementList.append(move)\n \n return MovementList",
"def legal_moves():\n\tlegal_moves = (\"r\", \"p\", \"s\")\n\treturn legal_moves",
"def getLegalActions(self, state):\n actions = [i for i in range(-5, 6)]\n for action in actions:\n if action > state[0] or action < -state[1]:\n actions.remove(action)\n return actions",
"def getPossibleActions(self, state):\n if self.weHaveBall(state):\n return [('hold',)] + [('pass', i) for i in range(1, self.keeperNum)]\n else:\n return [None]",
"def findActions(problem, state):\r\n size = len(problem) - 1\r\n legalActions = []\r\n if state[0] > 0 and problem[state[0] - 1][state[1]] != 'w':\r\n legalActions.append('N')\r\n if state[0] < size and problem[state[0] + 1][state[1]] != 'w':\r\n legalActions.append('S')\r\n if state[1] > 0 and problem[state[0]][state[1] - 1] != 'w':\r\n legalActions.append('W')\r\n if state[1] < size and problem[state[0]][state[1] + 1] != 'w':\r\n legalActions.append('E')\r\n return legalActions",
"def get_possible_actions(self, state):\n return [LEFT, DOWN, RIGHT, UP]",
"def getLegalActions( state ): ## This is being called by the GameState.getLegalActions function and uses self as the state argument.\n return Actions.getPossibleActions( state.getPacmanState().configuration, state.data.layout.walls )## REF-211 calls the getPossibleActions method in the Actions class.",
"def get_all_possible_moves(self, state):\n move_list = []\n done_finding_moves = False\n any_non_pass_moves = False\n while not done_finding_moves:\n try:\n m = next(self.move_generator) # Gets a (move, state) pair.\n # print(\"next returns: \",m[0]) # Prints out the move. For debugging.\n if m[0] != 'p':\n any_non_pass_moves = True\n move_list.append(m) # Add the move to the list.\n except StopIteration as e:\n done_finding_moves = True\n if not any_non_pass_moves:\n move_list.append(('p',state))\n return move_list",
"def actions(state):\n action_list = []\n\n if state.active_color == cc.WHITE_ACTIVE:\n active_pieces = cc.WHITE_PIECES\n elif state.active_color == cc.BLACK_ACTIVE:\n active_pieces = cc.BLACK_PIECES\n else:\n raise Exception(\"Actions: Invalid Active Color\")\n # Check for states where castling can occur\n castles = gm.get_castle(state)\n if castles[0]: # Kingside Castle\n action_list.append(cc.Action(piece=cc.W_KING, castle=cc.CASTLE_KINGSIDE))\n if castles[1]: # Queenside Castle\n action_list.append(cc.Action(piece=cc.W_KING, castle=cc.CASTLE_QUEENSIDE))\n\n # Loop over the board, finding the moves for each piece\n for rank in range(8):\n for column in range(8):\n if state.board[rank, column] in active_pieces:\n p = gm.Piece(state.board[rank, column], (rank, column))\n action_list.extend(p.get_moves(state))\n\n # Handle En passant attacks\n for action in action_list:\n if action.end == state.en_passant:\n action.capture = True\n\n return action_list",
"def actions(self, state):\n\n actions = []\n \n # if its player 1's turn\n if state.maxs_turn==True:\n # look through all the squares on the board\n for coords in state.gameState:\n # if its a rebel append allowable move and attack actions\n if state.gameState[coords]=='R':\n if state.gameState[(coords[0]-1, coords[1])]== ' ':\n actions.append(\"Move: Rebel @ {} --> {}\".format(coords, (coords[0]-1, coords[1])))\n if ((coords[0]-1, coords[1]+1) in state.gameState) and (state.gameState[(coords[0]-1, coords[1]+1)]== 'S'):\n actions.append(\"Attack: Rebel @ {} --> Sith @ {}\".format(coords, (coords[0]-1, coords[1]+1)))\n if ((coords[0]-1, coords[1]-1) in state.gameState) and (state.gameState[(coords[0]-1, coords[1]-1)]== 'S'):\n actions.append(\"Attack: Rebel @ {} --> Sith @ {}\".format(coords, (coords[0]-1, coords[1]-1)))\n \n # if its a jedi append allowable move and attack actions\n elif state.gameState[coords]=='J':\n for direction in [(-1, 0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]:\n coord = (coords[0]+direction[0], coords[1]+direction[1])\n # walk in each direction until reaching the edge of board, or a player\n while (coord in state.gameState) and (state.gameState[coord] == ' '):\n actions.append(\"Move: Jedi @ {} --> {}\".format(coords, coord))\n coord = (coord[0]+direction[0], coord[1]+direction[1])\n # if we ran into a sith we can attack\n if (coord in state.gameState) and (state.gameState[coord] == 'S'):\n actions.append(\"Attack: Jedi @ {} --> Sith @ {}\".format(coords, coord))\n \n else:\n for coords in state.gameState:\n if state.gameState[coords]=='S':\n for direction in [(-1, 0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]:\n coord = (coords[0]+direction[0], coords[1]+direction[1])\n if (coord in state.gameState) and (state.gameState[coord] == ' '):\n actions.append(\"Move: Sith @ {} --> {}\".format(coords, coord))\n elif (coord in state.gameState) and (state.gameState[coord] == 'R'):\n actions.append(\"Attack: Sith @ {} --> Rebel @ {}\".format(coords, coord))\n elif (coord in state.gameState) and (state.gameState[coord] == 'J'):\n actions.append(\"Attack: Sith @ {} --> Jedi @ {}\".format(coords, coord))\n \n\n\n if len(actions)==0:\n actions.append(\"Pass\")\n \n actions.sort()\n \n return actions",
"def getLegalActions(self, state):\n return self.actionFn(state)",
"def get_pawn_moves(self, state):\n pawn_moves = []\n\n if self.color == cc.WHITE_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_UP)\n forward_2 = add_vectors(self.coord, cc.V_UP_2)\n attacks = get_crawler_moves(self.coord, cc.W_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_2\n promo_rank = cc.RANK_8\n promo_pieces = cc.WHITE_PROMO\n enemy_set = cc.BLACK_PIECES\n elif self.color == cc.BLACK_ACTIVE:\n forward_1 = add_vectors(self.coord, cc.V_DOWN)\n forward_2 = add_vectors(self.coord, cc.V_DOWN_2)\n attacks = get_crawler_moves(self.coord, cc.B_PAWN_CAPTURE_VECTORS)\n starting_rank = cc.RANK_7\n promo_rank = cc.RANK_1\n promo_pieces = cc.BLACK_PROMO\n enemy_set = cc.WHITE_PIECES\n else:\n raise Exception(\"get_pawn_moves: Invalid Piece Color\")\n\n if validate_move(forward_1) and state.board[forward_1] == cc.NO_PIECE:\n if forward_1[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_1))\n if self.coord[0] == starting_rank and validate_move(forward_2) and state.board[forward_2] == cc.NO_PIECE:\n pawn_moves.append(cc.Action(self.string, self.coord, forward_2, en_p=forward_1))\n\n for attack in attacks:\n if state.board[attack] in enemy_set:\n if attack[0] == promo_rank:\n for p in promo_pieces:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True, promo=p))\n else:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n # Make sure Pawns can attack en_passant squares\n elif attack == state.en_passant:\n pawn_moves.append(cc.Action(self.string, self.coord, attack, capture=True))\n\n return pawn_moves",
"def possible(state_board,turn):\n\tlegal_moves = [] # list of legal moves as Move objects\n\tfor i in range(1,9):\n\t\tfor j in range(1,9):\n\t\t\tif state_board[i][j] == 0:\n\t\t\t\tif flipper([i,j],turn,state_board) != []:\n\t\t\t\t\t# if there are flipped pieces, it appends this move to\n\t\t\t\t\t# the legal moves and draws it in light greens\n\t\t\t\t\tlegal_moves.append((i,j))\n\t\t\t\t\tdrawPiece((i,j),3)\n\t\t\t\telse:\n\t\t\t\t\t# if it is 0 and is not legal, make sure it is of bgcolor\n\t\t\t\t\tdrawPiece((i,j),0)\n\t\n\treturn legal_moves",
"def actions(self, state):\n\n possible_actions = ['UP', 'DOWN', 'LEFT', 'RIGHT']\n index_blank_square = self.find_blank_square(state)\n\n # implement actions here \n\n return possible_actions",
"def getLegalActions(self,state):\n return self.actionFn(state)",
"def get_legal_moves(self):\n # for each square in the castle figure out if an moves can occur from it.\n moves = []\n allowed = [self.turn]\n if self.turn == DEFENDER:\n allowed.extend((KING, CASTLE_OCCUPIED))\n it = np.nditer(self.board_state, flags=['multi_index'])\n while not it.finished:\n index = it.multi_index\n curr_loc = it[0]\n if curr_loc in allowed:\n moves.extend(self.get_legal_move_piece(curr_loc, index))\n it.iternext()\n return moves",
"def actions(self, state, enemy=False):\n vaccinate_actions = []\n quarantine_actions = []\n medics = 1\n police = 2\n if not enemy:\n for (i, j) in self.zoc:\n if state[(i, j)] == 'H':\n vaccinate_actions.append(('vaccinate', (i, j)))\n if (state[(i, j)] == 'S1' or state[(i, j)] == 'S2') and isDanger(state, i, j, self.zoc):\n quarantine_actions.append(('quarantine', (i, j)))\n else:\n for (i, j) in self.zoc_enemy:\n if state[(i, j)] == 'H':\n vaccinate_actions.append(('vaccinate', (i, j)))\n if (state[(i, j)] == 'S1' or state[(i, j)] == 'S2') and isDanger(state, i, j, self.zoc_enemy):\n quarantine_actions.append(('quarantine', (i, j)))\n\n vaccinate_actions_pre = powerset1(vaccinate_actions, medics)\n quarantine_actions_pre = powerset1(quarantine_actions, police)\n vaccinate_actions_tup = tuple(vaccinate_actions_pre)\n quarantine_actions_tup = tuple(quarantine_actions_pre)\n\n if ((len(vaccinate_actions_tup) == 0) and (len(quarantine_actions_tup) != 0)):\n possible_actions = quarantine_actions_tup\n elif ((len(quarantine_actions_tup) == 0) and (len(vaccinate_actions_tup) != 0)):\n possible_actions = vaccinate_actions_tup\n elif ((len(quarantine_actions_tup) == 0) and (len(vaccinate_actions_tup) == 0)):\n possible_actions = [()]\n else:\n possible_actions = tuple()\n for action_p in quarantine_actions_tup:\n for action_m in vaccinate_actions_tup:\n action_m += action_p\n possible_actions += (action_m, action_p)\n possible_actions += vaccinate_actions_tup + quarantine_actions_tup\n return tuple(possible_actions)",
"def get_possible_actions(self, state):\n return tuple(self._transition_probs.get(state, {}).keys())",
"def actions(self, state):\n MovementList = []\n #Check if the agent is able to move a box (Left, Down, Right, Up) \n #without moving it into a taboo cell or pushing two blocks (Invalid move)\n #then move the box in the given direction.\n \n \n moves = [\"Up\", \"Down\", \"Left\", \"Right\"]\n opposite_moves = [\"Down\", \"Up\", \"Right\", \"Left\"]\n worker = state[0]\n boxes = state[1]\n temp_warehouse = self.warehouse.copy(worker, boxes)\n no_go = self.taboo.copy()\n walls = self.walls.copy()\n for wall in walls:\n no_go.append(wall)\n \n accessible = []\n \n for box in boxes:\n for i in range(len(moves)):\n surrounding_space = move_coords(box, opposite_moves[i])\n if can_go_there(temp_warehouse, move_coords(box, opposite_moves[i])):\n accessible.append((surrounding_space, moves[i]))\n \n for space_move in accessible:\n space = space_move[0]\n move = space_move[1]\n box_push_space = move_coords(move_coords(space, move), move)\n if (box_push_space in no_go) or (box_push_space in boxes):\n continue\n else:\n MovementList.append((move_coords(space, move), move)) \n print(\"Movement List: \", MovementList)\n \n if len(accessible) < 0: \n # Iterate throguh the moves and make sure they satify constraints\n for move in moves:\n if (move_coords(worker, move) not in no_go):\n if (move_coords(worker, move) in boxes):\n if move_coords(move_coords(worker, move), move) not in boxes:\n MovementList.append((move_coords(worker, move), move)) \n else:\n MovementList.append((move_coords(worker, move), move))\n \n \n \n \n \n return MovementList",
"def _available_actions(self, state, colour):\n available_actions = []\n if colour == \"white\":\n stacks = +state.state\n else:\n stacks = -state.state\n for square in stacks.keys():\n available_actions.append((\"BOOM\", square))\n for square, n in stacks.items():\n for d in range(1, n + 1):\n for next_square in self._NEXT_SQUARES(square, d):\n if next_square in stacks or state.state[next_square] == 0:\n for m in range(1, n + 1):\n move_action = (\"MOVE\", m, square, next_square)\n available_actions.append(move_action)\n return available_actions",
"def get_legal_actions(self, index):\n actions = []\n agent = self.agent_states[index]\n for action in ACTIONS:\n pos = agent.pos[0] + action[0], agent.pos[1] + action[1]\n if MAP[pos[0]][pos[1]] not in WALL:\n actions.append(action)\n return actions",
"def step(self, state):\n mcts_action = self.mcts_search(state)\n policy = [(action, (1.0 if action == mcts_action else 0.0))\n for action in state.legal_actions(self.player_id())]\n\n return policy, mcts_action",
"def legal_moves(player, board):\n return [sq for sq in Othello.squares() if Othello.is_legal(sq, player, board)]",
"def possible_moves(state_int):\n assert isinstance(state_int, int)\n field = decode_binary(state_int)\n return [idx for idx, col in enumerate(field) if len(col) < GAME_ROWS]",
"def get_legal_moves(self, player):\r\n move_list = []\r\n if self._phase == GamePhase.SETUP:\r\n return self._setup_legal_moves(player)\r\n elif self._phase == GamePhase.MOVE:\r\n return self._move_legal_moves(player)\r\n elif self._phase == GamePhase.BUILD:\r\n return self._build_legal_moves(player)\r\n return move_list",
"def getLegalMovingActions(state,agent):\n actions = state.getLegalActions(agent)\n # Removing 'Stop'\n if Directions.STOP in actions:\n actions.remove(Directions.STOP)\n return actions",
"def actions(self, player):\n snake = self.snakes.get(player)\n head = snake.position[0]\n return [m for m in MOVES\n if utils.isOnGrid(m.apply(head), self.grid_size)\n and snake.authorizedMove(m)]",
"def move(self, state_prev, state, reward):\n actions = []\n if state:\n self.select_player(state['ball'])\n else:\n self.selected = NUM_TEAM//2\n for i, player in enumerate(self.players):\n move = player.move(state_prev, state, reward, self.selected)\n if move != 'FORM':\n actions.append(move)\n else:\n actions.append(self.formation_dir(i))\n return actions"
] | [
"0.7829784",
"0.7794976",
"0.72625947",
"0.7095838",
"0.7046285",
"0.70153415",
"0.70093673",
"0.69802105",
"0.6962673",
"0.6890715",
"0.6863222",
"0.6827296",
"0.66367495",
"0.66215557",
"0.66136307",
"0.66089207",
"0.65999246",
"0.6596026",
"0.65576553",
"0.6540269",
"0.6538662",
"0.65383077",
"0.64809287",
"0.64495707",
"0.64364856",
"0.638771",
"0.63836336",
"0.6359694",
"0.6353979",
"0.63442975"
] | 0.79888695 | 0 |
Start the client listening to the game. Pass in a function that accepts the available actions and the current state of the game, and returns the action to take. The SDK will handle the rest. Checks if any commandline arguments are passed when running, if there are any, they are assumed to be client keys that are sent to the server for connecting. | def start(turn_handler):
if os.environ.get('BOTBOX_SECRET'):
print('Using env secret:', os.environ['BOTBOX_SECRET'])
headers = {'Authorization': os.environ['BOTBOX_SECRET']}
elif len(sys.argv) > 1:
print('Using cli secret:', sys.argv[1])
headers = {'Authorization': sys.argv[1]}
else:
print('Using no authentication')
headers = []
# get the URL for the server from an environment variable if it is set,
# otherwise use the default localhost
if os.environ.get('BOTBOX_SERVER'):
url = (WS_SERVER_SCHEME + '://'
+ os.environ['BOTBOX_SERVER'] + ':' + WS_SERVER_PORT)
else:
url = WS_SERVER_SCHEME + '://' + WS_SERVER_URL + ':' + WS_SERVER_PORT
print("Connecting to:", url)
ws = websocket.WebSocketApp(
url,
on_open = _on_open,
on_message = lambda ws, msg: _on_message(ws, msg, turn_handler),
on_error = _on_error,
on_close = _on_close,
header = headers
)
ws.run_forever() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start(self):\n if self._callable:\n self._is_running = True\n self._run_client()",
"async def run():\n # Get the arguments from the parser\n args = client.arguments\n\n # If the help argument was used, return\n if hasattr(args, \"help\"):\n return\n # Otherwise, check the correct command and invoke the respective function\n # BUILD\n if args.command == \"build\":\n if args.action == \"delete\":\n await client.delete_build(args.build)\n elif args.action == \"download\":\n await client.download_build(args.build, args.force)\n elif args.action == \"info\":\n await client.show_build(args.build)\n # BUILDS\n elif args.command == \"builds\":\n if args.refresh:\n await client.update_builds()\n await client.show_builds(args.ready_only)\n # FOLDER\n elif args.command == \"folder\":\n if args.action == \"create\":\n await client.create_folder(args.folder, args.no_resources)\n elif args.action == \"info\":\n await client.get_folder(args.folder)\n elif args.action == \"resources\":\n await client.get_resources(args.folder)\n elif args.action == \"delete\":\n await client.delete_folder(args.folder)\n # FOLDERS\n elif args.command == \"folders\":\n if args.refresh:\n await client.post(\"/folders\")\n await client.show_folders()\n # SERVER\n elif args.command == \"server\":\n if args.action == \"start\":\n await client.start_server(args.server, args.build)\n elif args.action == \"info\":\n await client.get_server(args.server)\n elif args.action == \"stop\":\n await client.stop_server(args.server)\n # SERVERS\n elif args.command == \"servers\":\n await client.print_servers()\n # INFO\n else:\n await client.show_info()",
"def run_action(client: Client, args: Namespace):\n\n result = None\n\n if args.action == 'exec':\n result = client.run(args.command, *args.argument)\n elif args.action == 'say':\n result = client.say(args.message)\n elif args.action == 'fortune':\n result = client.fortune(\n short=not args.long, offensive=args.offensive)\n elif args.action == 'datetime':\n result = client.datetime(frmt=args.format)\n elif args.action == 'in-use':\n players = client.players\n\n if players.online:\n LOGGER.info('There are %i players online:', players.online)\n LOGGER.info(', '.join(players.names))\n else:\n LOGGER.warning('There are no players online.')\n exit(1)\n\n if result:\n LOGGER.info(result)",
"def run_chat_client():\r\n while must_run:\r\n print_menu()\r\n action = select_user_action()\r\n perform_user_action(action)\r\n print(\"Thanks for watching. Like and subscribe! 👍\")",
"def main():\n\n args = get_args()\n log_level = DEBUG if args.debug else INFO\n basicConfig(level=log_level, format=LOG_FORMAT)\n host, port, passwd = get_credentials(args.server)\n\n try:\n with Client(host, port, timeout=args.timeout) as client:\n if not client.login(passwd):\n LOGGER.error('Failed to log in.')\n exit(4)\n\n if args.action == 'idle-shutdown':\n players = client.players\n else:\n run_action(client, args)\n except timeout:\n LOGGER.error('Connection timeout.')\n exit(3)\n\n if args.action == 'idle-shutdown':\n if not idle_shutdown(players, args):\n exit(1)",
"def start(self, autologin=True, autoreconnect=False):\n self.autologin = autologin\n self.autoreconnect = autoreconnect\n if self.loop.is_running():\n self.add_task(self._handler())\n logger.info(\n \"The client's event loop was already running. \"\n \"The client will run as a new task on the loop.\"\n )\n return True\n else:\n self.loop.run_until_complete(self._handler())\n return False",
"def start():\n if not cfg.irc:\n logging.warning(\"Skipping IRC module: no configuration provided\")\n return\n\n server = cfg.irc.server\n port = cfg.irc.port\n ssl = cfg.irc.ssl\n nick = cfg.irc.nick\n channels = cfg.irc.channels\n\n logging.info(\n \"Starting IRC client: server=%r port=%d ssl=%s nick=%r \" \"channels=%r\",\n server,\n port,\n ssl,\n nick,\n channels,\n )\n\n bot = Bot(cfg.irc)\n utils.DaemonThread(target=bot.start).start()\n\n evt_target = EventTarget(bot)\n events.dispatcher.register_target(evt_target)\n utils.DaemonThread(target=evt_target.run).start()",
"def start( *args, **kwargs ):",
"def start():\n server = current_server()\n logger.info('Starting Flexx event loop.')\n server.start()",
"def main():\n door = TalkingDoor()\n\n application = ApplicationBuilder().token(TOKEN).build()\n\n application.add_handlers(\n [\n CommandHandler([\"start\", \"help\"], door.help),\n CommandHandler(\"status\", door.status),\n CommandHandler(\"alarm\", door.alarm),\n CommandHandler(\"last_vid\", door.last_vid),\n CommandHandler(\"last_vids\", door.last_vids),\n CommandHandler(\"stop\", door.stop),\n CommandHandler(\"last\", door.last),\n CommandHandler(\"lines\", door.last_lines),\n ]\n )\n application.add_handler(CallbackQueryHandler(door.button))\n\n application.post_init = send_keyboard\n\n application.run_polling()",
"def main():\n s = start_server()\n accept_connection(s)",
"def game_start(self):\r\n\t\tself._comm_server.broadcast_message(\"game-start\")\r\n\t\tself._is_game_started = True\r\n\t\tself._handlers[\"game-start\"].invoke()\r\n\t\t_logger.info(\"Game is started.\")",
"def main() -> None:\n\n logger.info(f\"Arguments: {args}\")\n client = iotcore.Client()\n client.subscribe(args.request_topic, iotcore.QOS.AT_MOST_ONCE, handler)\n\n while True:\n # Keep app open and running\n time.sleep(1)",
"def main():\n usage = \"usage: %prog [options] channels\"\n parser = OptionParser(usage=usage)\n\n (options, args) = parser.parse_args()\n\n if len(args) < 1:\n parser.print_help()\n return 2\n\n # do stuff\n # This runs the program in the foreground. We tell the reactor to connect\n # over TCP using a given factory, and once the reactor is started, it will\n # open that connection.\n reactor.connectTCP(HOST, PORT, MyFirstIRCFactory(args))\n # Since we're running in the foreground anyway, show what's happening by\n # logging to stdout.\n log.startLogging(sys.stdout)\n # And this starts the reactor running. This call blocks until everything is\n # done, because this runs the whole twisted mainloop.\n reactor.run()",
"def main():\n if \"cli\" in sys.argv:\n run_cli_game()\n else:\n run_gui_game()",
"def main():\n Fire(cli)",
"def start(self):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.run_callback(self.switch)",
"def main():\n\n\t# Initialize the node\n\trospy.init_node(\"node_action_server_ros_iot_bridge\")\n\n\t# Create a object for RosIotBridgeActionServer class\n\taction_server = RosIotBridgeActionServer()\n\n\t# Not letting this node die\n\trospy.spin()",
"def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()",
"def start(self):\n while True:\n #requests.get(\"http://localhost:8080/clear\")\n if use_launch_phrase:\n recognizer, audio = self.speech.listen_for_audio()\n if self.speech.is_call_to_action(recognizer, audio):\n self.__acknowledge_action()\n self.decide_action()\n else:\n self.decide_action()",
"def start(self):\n\t\tself.init_trajectory_gripper()\n\t\tself.gripperserver.start()\n\t\tprint(\"The action server for this driver has been started\")",
"def main():\n # Parse arguments for configuration and light type\n parser = argparse.ArgumentParser()\n parser.add_argument(\"light_type\", help=\"lifx or hue\", choices=['lifx', 'hue'], type = str.lower)\n parser.add_argument(\"-c\", \"--config_mode\", action='store_true', help=\"runs the client in config mode which prints out the light data\")\n \n args = parser.parse_args()\n \n config_mode = args.config_mode\n light_type = args.light_type\n \n # Get light information \n # *Note*\n # Only LIFX is supported at this point in time\n light_service = None\n if light_type == 'lifx':\n light_service = lightservice.LIFXLightService(\"https://api.lifx.com/v1/\")\n \n data = light_service.refresh_light_data(config_mode)\n \n button_handler = None\n if config_mode:\n button_handler = buttonhandler.ConfigButtonHandler()\n button_handler.start()\n else:\n button_handler = buttonhandler.ButtonHandler(data)\n button_handler.start(light_service)",
"def main():\r\n if check_argv():\r\n if len(sys.argv) == 3:\r\n gui = GUI(sys.argv[1], int(sys.argv[2]), True, ip=None)\r\n gui.create_board()\r\n gui.root.title(\"Server\")\r\n if not gui.is_human():\r\n gui.ai.find_legal_move(gui.game, gui.update_game)\r\n gui.run_game()\r\n elif len(sys.argv) == 4:\r\n ip = socket.gethostbyname(socket.gethostname())\r\n gui = GUI(sys.argv[1], int(sys.argv[2]), False, ip)\r\n gui.create_board()\r\n gui.root.title(\"Client\")\r\n if not gui.is_human():\r\n gui.ai.find_legal_move(gui.game, gui.update_game)\r\n gui.run_game()",
"def execute():\n command_line_args = argv[1:]\n args = cli(command_line_args)\n\n callback = args.callback\n kwargs = {\n k: v\n for k, v in args.__dict__.items()\n if k != \"callback\"\n }\n\n main(callback, **kwargs)",
"def start_game(self, **kwargs):\n\n success, info = self.gms.start_game(\n player=kwargs.get('player', 'x'),\n first_turn=raw_input('Would you like to go first? y/n\\n') == 'y'\n )\n if success:\n if info['status_code'] == core_constants.GAME_STATUS_HUMAN_MOVE_REQUIRED:\n print(self.gms.game.get_board_state_pretty())\n self.play_human_move()\n else:\n print(info['messages'][0])",
"def run_cli_game():\n # Set up game\n view = ConsoleView()\n game = GameEngine(view)\n\n # Game loop\n while not game.game_over:\n view.turn_started()\n\n # Get move to make from user and execute it\n move = input()\n print()\n \n execute_move(move, game, view)",
"def main(args):\n\n cocos.director.director.init(resizable=True)\n\n Conversation = wit_handler.Conversation()\n TextBox = text_box.TextBox(\n enter_function=Conversation.send_message\n )\n\n # Run a scene with our event displayers:\n cocos.director.director.run(cocos.scene.Scene(TextBox))",
"def run_server(self, GameState):\n pass",
"def start(self) -> None:\n\n while not self.stop_listening:\n if self.world_rank == 0:\n command = MDI_Recv_Command(self.comm)\n else:\n command = None\n if self.world_rank == 0:\n print(\"MDI command received: \" + str(command))\n\n # Search for this command in self.commands\n found_command = False\n for supported_command in self.commands:\n if not found_command and command == supported_command:\n # Run the function corresponding to this command\n self.commands[supported_command]()\n found_command = True\n if not found_command:\n raise Exception(\"Unrecognized command: \" + str(command))",
"def _start(args=None):\n options = _parse_args(args)\n main(**options)"
] | [
"0.61051023",
"0.597413",
"0.59535104",
"0.5894354",
"0.58022404",
"0.5720852",
"0.56565666",
"0.56241596",
"0.56227505",
"0.5545233",
"0.5524626",
"0.54708153",
"0.54685193",
"0.54664093",
"0.5453326",
"0.5451305",
"0.54465693",
"0.5406699",
"0.5384227",
"0.53778505",
"0.5373198",
"0.53630126",
"0.5355289",
"0.5341888",
"0.53399384",
"0.53392494",
"0.5337174",
"0.52947944",
"0.5291907",
"0.52859837"
] | 0.6006053 | 1 |
This is a private method that handles incoming messages from the websocket, passes the turn information to an agent's turn handler, and then passes the result back to the server. | def _on_message(ws, msg, turn_handler):
def x():
parsed = json.loads(msg)
player = parsed['player']
actions = parsed['actions']
state = parsed['state']
action = turn_handler(player, actions, state)
response = {"action":action}
ws.send(json.dumps(response))
_thread.start_new_thread(x, ()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def on_message(self, wsobj, message):\n\n message = json.loads(message)\n\n # If needed, complete the websocket handshake\n if message[\"op\"] == \"C\":\n self.on_open(wsobj, message=message)\n\n # The next few lines ensure only gameplay related event for the\n # specified game are provided. Otherwise, ESPN's websockets include\n # noisy league-wide information.\n elif \"pl\" in message:\n if message[\"pl\"] != \"0\" and message[\"tc\"] == self.channel:\n decoded = self.decode_message(message)\n self.write_message(wsobj, decoded)",
"def on_message(self, ws, buf):\n if self.phase_auth:\n self.on_phase_auth_message(buf)\n return\n\n msg = self.__nanojsonrpc_unpack(buf)\n shot = msg['shot']\n\n if shot < 0:\n # Authenticate\n self.__ws_conn.send(self.__nanojsonrpc_pack('auth'))\n\n elif self.__shot_finished[shot]:\n # End\n pass\n\n elif shot in self.shot_threadings:\n # Forward\n self.__shot_inboxes[shot].put(msg)\n\n else:\n # If the shot does not exist, a 'worker' will be established.\n worker = UbqcClient(\n shot,\n self.__shot_inboxes[shot],\n self.shot_outbox,\n self.program,\n )\n worker.daemon = True\n\n self.shot_threadings[shot] = worker\n worker.start()\n self.__shot_inboxes[shot].put(msg)",
"def handleMessage(self, channels, sender, code, datagram):\n self.stateServer.handle(channels, sender, code, datagram)\n self.clientAgent.handle(channels, sender, code, datagram)\n self.databaseServer.handle(channels, sender, code, datagram)",
"def start(turn_handler):\n\n if os.environ.get('BOTBOX_SECRET'):\n print('Using env secret:', os.environ['BOTBOX_SECRET'])\n headers = {'Authorization': os.environ['BOTBOX_SECRET']}\n elif len(sys.argv) > 1:\n print('Using cli secret:', sys.argv[1])\n headers = {'Authorization': sys.argv[1]}\n else:\n print('Using no authentication')\n headers = []\n\n # get the URL for the server from an environment variable if it is set,\n # otherwise use the default localhost\n if os.environ.get('BOTBOX_SERVER'):\n url = (WS_SERVER_SCHEME + '://'\n + os.environ['BOTBOX_SERVER'] + ':' + WS_SERVER_PORT)\n else:\n url = WS_SERVER_SCHEME + '://' + WS_SERVER_URL + ':' + WS_SERVER_PORT\n\n print(\"Connecting to:\", url)\n\n ws = websocket.WebSocketApp(\n url,\n on_open = _on_open,\n on_message = lambda ws, msg: _on_message(ws, msg, turn_handler),\n on_error = _on_error,\n on_close = _on_close,\n header = headers\n )\n\n ws.run_forever()",
"def process_turn(self):\n data = self.bot.on_turn({'map': self.encode_map(), 'player_num': PLAYER_ID})\n for action in data.get('ACTIONS', []):\n f = getattr(self, action['action_type'].lower(), lambda **k: None)\n f(**action)",
"def handleMessage(msg):",
"async def receiver(self):\n socket_input = await self.websocket.recv()\n logger.debug(\"<<< Received:\\n{}\".format(socket_input))\n\n # Showdown sends this response on initial connection\n if socket_input == \"o\":\n logger.info(\"Connected on {}\".format(self.websocket_url))\n self.connected = True\n self.add_task(self.on_connect())\n return\n\n inputs = utils.parse_socket_input(socket_input)\n for room_id, inp in inputs:\n room_id = room_id or \"lobby\"\n logger.debug(\"||| Parsing:\\n{}\".format(inp))\n inp_type, params = utils.parse_text_input(inp)\n\n # Set challstr attributes and autologin\n if inp_type == \"challstr\":\n self.challengekeyid, self.challstr = params\n if self.name and self.password and self.autologin:\n await self.login()\n elif self.autologin:\n msg = (\n \"Cannot login without username and password. If \"\n \"you don't want your client to be logged in, \"\n \"you can use Client.start(autologin=False).\"\n )\n raise Exception(msg)\n\n # Process query response\n elif inp_type == \"queryresponse\":\n response_type, data = params[0], \"|\".join(params[1:])\n data = json.loads(data)\n self.add_task(\n self.on_query_response(response_type, data), transient=True\n )\n if response_type == \"savereplay\":\n self.add_task(\n self.server.save_replay_async(data), transient=True\n )\n\n # Challenge updates\n elif inp_type == \"updatechallenges\":\n self.challenges = json.loads(params[0])\n self.add_task(\n self.on_challenge_update(self.challenges), transient=True\n )\n\n # Messages\n elif inp_type == \"c:\" or inp_type == \"c\":\n timestamp = None\n if inp_type == \"c:\":\n timestamp, params = int(params[0]), params[1:]\n author_str, *content = params\n content = \"|\".join(content)\n chat_message = message.ChatMessage(\n room_id, timestamp, author_str, content, client=self\n )\n self.add_task(\n self.on_chat_message(chat_message), transient=True\n )\n elif inp_type == \"pm\":\n author_str, recipient_str, *content = params\n content = \"|\".join(content)\n private_message = message.PrivateMessage(\n author_str, recipient_str, content, client=self\n )\n self.add_task(\n self.on_private_message(private_message), transient=True\n )\n\n # Rooms\n elif inp_type == \"init\":\n room_type = params[0]\n room_obj = room.class_map.get(room_type, room.Room)(\n room_id, client=self, max_logs=self.max_room_logs\n )\n self.rooms[room_id] = room_obj\n self.add_task(self.on_room_init(room_obj), transient=True)\n elif inp_type == \"deinit\":\n if room_id in self.rooms:\n self.add_task(\n self.on_room_deinit(self.rooms.pop(room_id)),\n transient=True,\n )\n\n # add content to proper room\n if isinstance(self.rooms.get(room_id, None), room.Room):\n self.rooms[room_id].add_content(inp)\n\n self.add_task(\n self.on_receive(room_id, inp_type, params), transient=True\n )",
"def message_received_from_server(self, message):\n\n if message[\"type\"] == \"state\":\n self._board = message[\"board\"]\n self._winner = message[\"winner\"]\n elif message[\"type\"] == \"turn\":\n row, col = self._find_empty_cell()\n self.send_message_to_server({\n \"type\": \"move\",\n \"row\": row,\n \"column\": col\n })",
"def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))",
"def process_websocket(ws):\n try:\n yield from ws.receive()\n except aiohttp.errors.WSServerHandshakeError:\n pass",
"def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))",
"def handle(self, message: Message) -> None:\n self.handled_message = message\n envelope = Envelope(\n to=message.counterparty,\n sender=self.context.agent_address,\n protocol_id=TwoPartyNegotiationMessage.protocol_id,\n message=self.encoded_message_2_in_bytes,\n )\n self.context.outbox.put(envelope)",
"def handleMessage(self):\n try:\n # This whole block is wrapped in a try/except because the default\n # behaviour of the SimpleWebSocketServer library is to silently\n # discard any exceptions raised by these handlers. This is very\n # unhelpful. A workaround is to catch and log any exceptions\n # explicitly here.\n logging.debug(\"%s %s\", self.address, \"incoming message\")\n is_binary = not isinstance(self.data, str)\n if is_binary:\n message = self.data\n else:\n message = json.loads(self.data)\n\n self.logbook.messages_received.append(message)\n for response in self.get_responses(message, is_binary=is_binary):\n self.logbook.messages_sent.append(response)\n self.sendMessage(json.dumps(response).encode(\"utf-8\"))\n except Exception as exc: # pylint: disable=broad-except\n logging.exception(str(exc))\n self.close(status=1011, reason=\"Internal server error\")",
"async def _incoming_ws(self, pid, websocket):\n # websockets have a convenient __aiter__ interface, allowing\n # us to just iterate over the messages forever.\n # Under the hood, if there are no messages available from the\n # WebSocket, this code will yield and until another message is\n # received.\n\n # If the WebSocket is disconnected unexpectedly, the for loop\n # will produce an exception.\n try:\n async for msg in websocket:\n # Trim whitespace\n msg = msg.strip()\n # Make sure the message isn't an empty string\n if msg:\n # Pass the message onto the server's handler.\n self.on_player_msg(pid, msg)\n # If we get this error, then player probably just logged off.\n except websockets.exceptions.ConnectionClosed:\n pass\n finally:\n logging.debug(\"_incoming_ws closed for %s\", pid)",
"def handle(self, message):",
"def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client",
"def handle_message(self, message):",
"def gym_handle(ws):\n while True:\n message = ws.wait()\n if message is None: \n break\n message_handle(ws, message)",
"def _on_inbound_message(self, message):\n if message.channel.startswith(\"actuators/commands/\"):\n actuation = self.inbound_message_deserializer.deserialize_actuator_command(message)\n if actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_SET:\n self.actuation_handler.handle_actuation(actuation.reference, actuation.value)\n\n state, value = self.actuator_status_provider.get_actuator_status(actuation.reference)\n actuator_status = ActuatorStatus.ActuatorStatus(actuation.reference, state, value)\n\n outbound_message = self.outbound_message_factory.make_from_actuator_status(actuator_status)\n if not self.connectivity_service.publish(outbound_message):\n self.outbound_message_queue.put(outbound_message)\n elif actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_STATUS:\n state, value = self.actuator_status_provider.get_actuator_status(actuation.reference)\n\n actuator_status = ActuatorStatus.ActuatorStatus(actuation.reference, state, value)\n\n outbound_message = self.outbound_message_factory.make_from_actuator_status(actuator_status)\n if not self.connectivity_service.publish(outbound_message):\n self.outbound_message_queue.put(outbound_message)\n elif actuation.command == ActuatorCommandType.ACTUATOR_COMMAND_TYPE_UNKNOWN:\n print(\"Received unsupported actuation command\")\n\n else:\n print(\"Received unsupported message: \\n\" +\n message.channel + \"\\n\" + message.payload)",
"def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)",
"def handle_message(self, data, channel):\n pass",
"def on_message(self,ws,message):\n pass",
"def process_incoming_message(self):\n\n # Get the webhook data\n post_data = request.json\n\n # Determine the Spark Room to send reply to\n room_id = post_data[\"data\"][\"roomId\"]\n\n # Get the details about the message that was sent.\n message_id = post_data[\"data\"][\"id\"]\n message = self.spark.messages.get(message_id)\n if self.DEBUG:\n sys.stderr.write(\"Message content:\" + \"\\n\")\n sys.stderr.write(str(message) + \"\\n\")\n\n # First make sure not processing a message from the bots\n # Needed to avoid the bot talking to itself\n # We check using IDs instead of emails since the email\n # of the bot could change while the bot is running\n # for example from bot@sparkbot.io to bot@webex.bot\n if message.personId in self.spark.people.me().id:\n if self.DEBUG:\n sys.stderr.write(\"Ignoring message from our self\" + \"\\n\")\n return \"\"\n\n # Log details on message\n sys.stderr.write(\"Message from: \" + message.personEmail + \"\\n\")\n\n # Find the command that was sent, if any\n command = \"\"\n for c in self.commands.items():\n if message.text.find(c[0]) != -1:\n command = c[0]\n sys.stderr.write(\"Found command: \" + command + \"\\n\")\n # If a command was found, stop looking for others\n break\n\n # Build the reply to the user\n reply = \"\"\n\n # Take action based on command\n # If no command found, send the default_action\n if command in [\"\"] and self.default_action:\n # noinspection PyCallingNonCallable\n reply = self.commands[self.default_action][\"callback\"](message)\n elif command in self.commands.keys():\n # noinspection PyCallingNonCallable\n reply = self.commands[command][\"callback\"](message)\n else:\n pass\n\n # allow command handlers to craft their own Spark message\n if reply and isinstance(reply, Response):\n reply.roomId = room_id\n reply = reply.as_dict()\n self.spark.messages.create(**reply)\n reply = \"ok\"\n elif reply:\n self.spark.messages.create(roomId=room_id, markdown=reply)\n return reply",
"def handle_message(self, msg, identity=None):\n\n if (self._supervisor and\n not isinstance(msg, mplane.model.Envelope)):\n self._exporter.put_nowait([msg, identity])\n\n if isinstance(msg, mplane.model.Capability):\n self._add_capability(msg, identity)\n elif isinstance(msg, mplane.model.Withdrawal):\n self._withdraw_capability(msg, identity)\n elif isinstance(msg, mplane.model.Receipt):\n self._handle_receipt(msg, identity)\n elif isinstance(msg, mplane.model.Result):\n self._handle_result(msg, identity)\n elif isinstance(msg, mplane.model.Exception):\n self._handle_exception(msg, identity)\n elif isinstance(msg, mplane.model.Envelope):\n if msg.get_token() in self._receipts:\n self._handle_result(msg, identity)\n else:\n for imsg in msg.messages():\n self.handle_message(imsg, identity)\n else:\n raise ValueError(\"Internal error: unknown message \"+repr(msg))",
"async def _handle_battle_message(self, split_messages: List[List[str]]) -> None:\n # Battle messages can be multiline\n if (\n len(split_messages) > 1\n and len(split_messages[1]) > 1\n and split_messages[1][1] == \"init\"\n ):\n battle_info = split_messages[0][0].split(\"-\")\n battle = await self._create_battle(battle_info)\n else:\n battle = await self._get_battle(split_messages[0][0])\n\n for split_message in split_messages[1:]:\n if len(split_message) <= 1:\n continue\n elif split_message[1] in self.MESSAGES_TO_IGNORE:\n pass\n elif split_message[1] == \"request\":\n if split_message[2]:\n request = orjson.loads(split_message[2])\n battle._parse_request(request)\n if battle.move_on_next_request:\n await self._handle_battle_request(battle)\n battle.move_on_next_request = False\n elif split_message[1] == \"win\" or split_message[1] == \"tie\":\n if split_message[1] == \"win\":\n battle._won_by(split_message[2])\n else:\n battle._tied()\n await self._battle_count_queue.get()\n self._battle_count_queue.task_done()\n self._battle_finished_callback(battle)\n async with self._battle_end_condition:\n self._battle_end_condition.notify_all()\n elif split_message[1] == \"error\":\n self.logger.log(\n 25, \"Error message received: %s\", \"|\".join(split_message)\n )\n if split_message[2].startswith(\n \"[Invalid choice] Sorry, too late to make a different move\"\n ):\n if battle.trapped:\n await self._handle_battle_request(battle)\n elif split_message[2].startswith(\n \"[Unavailable choice] Can't switch: The active Pokémon is \"\n \"trapped\"\n ) or split_message[2].startswith(\n \"[Invalid choice] Can't switch: The active Pokémon is trapped\"\n ):\n battle.trapped = True\n await self._handle_battle_request(battle)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't switch: You can't switch to an active \"\n \"Pokémon\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't switch: You can't switch to a fainted \"\n \"Pokémon\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: Invalid target for\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: You can't choose a target for\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: \"\n ) and split_message[2].endswith(\"needs a target\"):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif (\n split_message[2].startswith(\"[Invalid choice] Can't move: Your\")\n and \" doesn't have a move matching \" in split_message[2]\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Incomplete choice: \"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Unavailable choice]\"\n ) and split_message[2].endswith(\"is disabled\"):\n battle.move_on_next_request = True\n elif split_message[2].startswith(\"[Invalid choice]\") and split_message[\n 2\n ].endswith(\"is disabled\"):\n battle.move_on_next_request = True\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: You sent more choices than unfainted\"\n \" Pokémon.\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n elif split_message[2].startswith(\n \"[Invalid choice] Can't move: You can only Terastallize once per battle.\"\n ):\n await self._handle_battle_request(battle, maybe_default_order=True)\n else:\n self.logger.critical(\"Unexpected error message: %s\", split_message)\n elif split_message[1] == \"turn\":\n battle._parse_message(split_message)\n await self._handle_battle_request(battle)\n elif split_message[1] == \"teampreview\":\n battle._parse_message(split_message)\n await self._handle_battle_request(battle, from_teampreview_request=True)\n elif split_message[1] == \"bigerror\":\n self.logger.warning(\"Received 'bigerror' message: %s\", split_message)\n else:\n battle._parse_message(split_message)",
"def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")",
"def _websocket_message(self, msg):\n if msg is None:\n self._logger.warn(\"Websocket server disconnected!\")\n if not self._disconnect_issued:\n if self._ws is not None:\n self._ws.close()\n self._ws = None\n yield self._connect(reconnecting=True)\n return\n try:\n msg = json.loads(msg)\n self._logger.debug(\"Message received: %s\", msg)\n msg_id = str(msg['id'])\n if msg_id.startswith('redis-pubsub'):\n self._process_redis_message(msg, msg_id)\n elif msg_id.startswith('redis-reconnect'):\n # only resubscribe to namespaces, the server will still\n # publish sensor value updates to redis because the client\n # did not disconnect, katportal lost its own connection\n # to redis\n yield self._resend_subscriptions()\n else:\n self._process_json_rpc_message(msg, msg_id)\n except Exception:\n self._logger.exception(\n \"Error processing websocket message! {}\".format(msg))\n if self._on_update:\n self._io_loop.add_callback(self._on_update, msg)\n else:\n self._logger.warn('Ignoring message (no on_update_callback): %s',\n msg)",
"def handle_turn(game_ID, team, action, payload):\n\n state = get_state(game_ID)\n if state[\"playerState\"][\"winner\"] != \"none\":\n return [\"playerState\"]\n if state[\"playerState\"][\"turn\"] != team or state[\"playerState\"][\"action\"] != action:\n raise InvalidTurnError(\n f'{state[\"playerState\"][\"action\"]} for {state[\"playerState\"][\"turn\"]} goes now'\n )\n if action == \"spymaster\":\n return spymaster_move(game_ID, payload[\"hint\"], payload[\"attempts\"])\n elif action == \"chooser\":\n return chooser_move(\n game_ID, state[\"wordsState\"], payload[\"guess\"], state[\"playerState\"][\"turn\"]\n )",
"def handle_handshake(self, message):\n message_type = messages.get_message_type(message)\n if message_type == \"OFPT_HELLO\":\n self.hello_received = True\n if message_type == \"OFPT_FEATURES_REPLY\":\n self.features_reply_received = True\n self.dpid = message.datapath_id\n if self.features_reply_received and self.hello_received:\n #print \"Switch on: %s:%s has the datapath ID: %s\" % (\n # self.address, self.port, self.dpid)\n if self.needs_migration:\n #print \"Migrating switch...\"\n self.handle_migration(message)\n else:\n self.activate_controller()\n self.controller.start_sending_to_switch()",
"def handle(self):\n for request in self._each_msg():\n r_len, r_type = struct.unpack_from('> I B', request)\n\n if r_type == self.SSH2_AGENTC_REQUEST_IDENTITIES:\n response = self._merge_identities(request)\n elif r_type == self.SSH2_AGENTC_SIGN_REQUEST:\n # Extract key blob from request\n key_blob_len = struct.unpack_from('> I', request, 5)[0]\n key_blob = request[9:9 + key_blob_len]\n hex_blob = ''.join('{:02x}'.format(b) for b in key_blob)\n\n agent = self._identity_map[hex_blob]\n\n if agent:\n if agent == self.server.alternate_agent:\n key_digest = self._key_digest(key_blob)\n LOG.info(\"identity %s used by %s: %s\", key_digest,\n self.username, self.process_info)\n\n response = agent.forward_request(request)\n else:\n response = \\\n self.server.default_agent.forward_request(request)\n else:\n response = self.server.default_agent.forward_request(request)\n\n self.request.sendall(response)"
] | [
"0.62253714",
"0.6034103",
"0.6009861",
"0.60036653",
"0.6001996",
"0.59676135",
"0.5915021",
"0.58753765",
"0.58735913",
"0.5867791",
"0.5857927",
"0.5834144",
"0.5820389",
"0.58074945",
"0.5798301",
"0.5788626",
"0.5782153",
"0.5777276",
"0.5736334",
"0.5656241",
"0.5642342",
"0.5607427",
"0.5601038",
"0.5595248",
"0.55904734",
"0.5582438",
"0.55785877",
"0.55758494",
"0.5565749",
"0.55592424"
] | 0.72230154 | 0 |
Tile an image to a given width and height. | def tile_image(
im: Image.Image, width: int, height: int, mode: Optional[str] = "RGB", **kwargs: Any
) -> Image.Image:
im_out = Image.new(mode, (width, height), **kwargs)
h_tiles = ceil(width / im.width)
v_tiles = ceil(height / im.height)
for i in range(v_tiles):
y = im.height * i
for j in range(h_tiles):
x = im.width * j
im_out.paste(im, box=(x, y))
return im_out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_image(self, image_location, width, height):\n tile_image = pygame.image.load(image_location).convert_alpha()\n # The tile is a square and the height is expected to be smaller than the width\n tile_width = width\n tile_height = height\n tile_image = pygame.transform.scale(tile_image, (tile_width, tile_height))\n\n # The self.image attribute expects a Surface, so we can manually create one and \"blit\" the tile image onto the surface (i.e. paint an image onto a surface).\n # We use list comprehension to quickly make the blits_data list of tuples (each tuple has the tile image, and the X and Y coordinates)\n # Don't know what list comprehension is? Go look it up on the Internet. That's what all professional software engineers do ;)\n image = pygame.Surface((width, height))\n blits_data = [(tile_image, (tile_width * i, 0)) for i in range(math.ceil(width / tile_width))]\n image.blits(blits_data)\n\n return image",
"def test_tiled():\n size = [25, 25]\n img = Image.new('RGB', (10, 10))\n img.putpixel((5, 5), (0, 255, 0))\n\n parameters = {'data': [img], 'size': size}\n\n tiled = images.tiled(parameters)\n\n assert_equal(tiled.size, tuple(size))\n assert_equal(tiled.getpixel((5, 5)), (0, 255, 0))\n assert_equal(tiled.getpixel((15, 5)), (0, 255, 0))",
"def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling",
"def tile(X, rows, cols):\n tiling = np.zeros((rows * X.shape[1], cols * X.shape[2], X.shape[3]), dtype = X.dtype)\n for i in range(rows):\n for j in range(cols):\n idx = i * cols + j\n if idx < X.shape[0]:\n img = X[idx,...]\n tiling[\n i*X.shape[1]:(i+1)*X.shape[1],\n j*X.shape[2]:(j+1)*X.shape[2],\n :] = img\n return tiling",
"def tile_image(im):\n r1 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n r2 = np.concatenate((im[:,::-1], im, im[:, ::-1]), 1)\n r3 = np.concatenate((im[::-1,::-1], im[::-1], im[::-1, ::-1]), 1)\n return(np.concatenate((r1, r2,r3), 0))",
"def stich(data, title=None):\n # Get name, list of tiles, width and height\n name = data[\"levels\"][0][\"name\"] \n tiles = data[\"levels\"][0][\"tiles\"]\n width = data[\"levels\"][0][\"width\"]\n height = data[\"levels\"][0][\"height\"]\n\n # Create the directory to place all the downloaded tiles in\n if title: #if title provided, name directory based on that\n dirname = title\n else: #if title not provided, generate a name\n dirname = name + str(width) + str(height)\n os.makedirs(dirname, exist_ok=True)\n os.chdir(dirname)\n\n #Create the empty image based on dimensions\n result = Image.new('RGB', (width, height))\n tile_size = None \n\n # actually get the tiles\n for i in tiles:\n image = get_tile(i['url']) #download image\n if not tile_size:\n tile_size = image.size[0] # on the first tile get the image size\n result.paste(im=image, box=(i['x'] * tile_size, i['y'] * tile_size)) # each tile has a number which isn't\n # it's cooridnate in pixels but it's order. \n # To get pixel coordinate just multiply by the size of each tile\n result.save('final.jpeg') # save file in directory\n os.chdir(os.path.join( os.path.dirname( __file__ ), '..' )) # then navigate back up to the base directory",
"def forward_tiled(self, image: numpy.ndarray, tile_size: int) -> numpy.ndarray:\n # Constant that only really gets repeated a ton here.\n context = 7\n context2 = context + context\n\n # Notably, numpy is used here because it makes this fine manipulation a lot simpler.\n # Scaling first - repeat on axis 2 and axis 3 (Y & X)\n image = image.repeat(2, 2).repeat(2, 3)\n\n # Resulting image buffer. This is made before the input is padded,\n # since the input has the padded shape right now.\n image_out = numpy.zeros(image.shape)\n\n # Padding next. Note that this padding is done on the whole image.\n # Padding the tiles would lose critical context, cause seams, etc.\n image = numpy.pad(image, [[0, 0], [0, 0], [context, context], [context, context]], mode = \"edge\")\n\n # Now for tiling.\n # The output tile size is the usable output from an input tile (tile_size).\n # As such, the tiles overlap.\n out_tile_size = tile_size - context2\n for out_y in range(0, image_out.shape[2], out_tile_size):\n for out_x in range(0, image_out.shape[3], out_tile_size):\n # Input is sourced from the same coordinates, but some stuff ought to be\n # noted here for future reference:\n # + out_x/y's equivalent position w/ the padding is out_x + context.\n # + The output, however, is without context. Input needs context.\n # + Therefore, the input rectangle is expanded on all sides by context.\n # + Therefore, the input position has the context subtracted again.\n # + Therefore:\n in_y = out_y\n in_x = out_x\n # not shown: in_w/in_h = tile_size (as opposed to out_tile_size)\n # Extract tile.\n # Note that numpy will auto-crop this at the bottom-right.\n # This will never be a problem, as tiles are specifically chosen within the padded section.\n tile = image[:, :, in_y:in_y + tile_size, in_x:in_x + tile_size]\n # Extracted tile dimensions -> output dimensions\n # This is important because of said cropping, otherwise it'd be interior tile size.\n out_h = tile.shape[2] - context2\n out_w = tile.shape[3] - context2\n # Process tile.\n tile_t = Tensor(tile)\n tile_fwd_t = self.forward(tile_t)\n # Replace tile.\n image_out[:, :, out_y:out_y + out_h, out_x:out_x + out_w] = tile_fwd_t.numpy()\n\n return image_out",
"def smaller(self):\n w1, h1 = float(self.imwidth), float(self.imheight)\n w2, h2 = float(self.__huge_size), float(self.__huge_size)\n aspect_ratio1 = w1 / h1\n aspect_ratio2 = w2 / h2 # it equals to 1.0\n if aspect_ratio1 == aspect_ratio2:\n image = Image.new('RGB', (int(w2), int(h2)))\n k = h2 / h1 # compression ratio\n w = int(w2) # band length\n elif aspect_ratio1 > aspect_ratio2:\n image = Image.new('RGB', (int(w2), int(w2 / aspect_ratio1)))\n k = h2 / w1 # compression ratio\n w = int(w2) # band length\n else: # aspect_ratio1 < aspect_ration2\n image = Image.new('RGB', (int(h2 * aspect_ratio1), int(h2)))\n k = h2 / h1 # compression ratio\n w = int(h2 * aspect_ratio1) # band length\n i, j, n = 0, 1, round(0.5 + self.imheight / self.__band_width)\n while i < self.imheight:\n print('\\rOpening image: {j} from {n}'.format(j=j, n=n), end='')\n band = min(self.__band_width, self.imheight - i) # width of the tile band\n self.__tile[1][3] = band # set band width\n self.__tile[2] = self.__offset + self.imwidth * i * 3 # tile offset (3 bytes per pixel)\n self.__image.close()\n self.__image = Image.open(self.path) # reopen / reset image\n self.__image.size = (self.imwidth, band) # set size of the tile band\n self.__image.tile = [self.__tile] # set tile\n cropped = self.__image.crop((0, 0, self.imwidth, band)) # crop tile band\n image.paste(cropped.resize((w, int(band * k) + 1), self.__filter), (0, int(i * k)))\n i += band\n j += 1\n print('\\r' + 30 * ' ' + '\\r', end='') # hide printed string\n return image",
"def _tile_image(self, data):\n image = Image.open(StringIO(data))\n return image.convert('RGBA')",
"def make_floor(self):\n\n for y in range(0, self.num_tiles[1] + 1):\n for x in range(0, self.num_tiles[0] + 1):\n offset = (x * self.tile.size[0], y * self.tile.size[1])\n self.image.blit(self.tile.image, offset)",
"def load_tile(path, tile_size):\n img = pyglet.resource.image(path)\n img.width = tile_size\n img.height = tile_size\n return img",
"def fit_image(self, img, width, height):\n if img.get_height()/height > img.get_width()/width:\n # scale is determined by width\n w = width\n h = int(math.ceil(img.get_height() * (w/img.get_width())))\n else:\n # scale is determined by height\n h = height\n w = int(math.ceil(img.get_width() * (h/img.get_height())))\n img = pygame.transform.smoothscale(img, (w,h))\n rect = img.get_rect()\n rect = rect.move((width-w)//2, (height-h)//2)\n img2 = pygame.Surface((width, height))\n img2.blit(img, rect)\n return img2",
"def tile_images(img, img_size=32, rows=4, cols=4, spacing=1):\n images = np.ones([3, rows * (img_size + spacing) - spacing, cols * (img_size + spacing)], dtype=np.float32)\n coords = [(i, j) for i in range(rows) for j in range(cols)]\n\n for (i, j), image in zip(coords, img):\n x = i * (img_size + spacing)\n y = j * (img_size + spacing)\n images[:, x: x+img_size, y:y+img_size] = image\n\n return images",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def test_image(filename, x_size=350, y_size=350):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n\n im.save(filename)",
"def img_to_tiles(cls, tiff_path, region, res, tile, tile_date_path, img_format, mp):\n\n # Get metadata from original image\n metadata = TiffMetadata(tiff_path)\n\n WIDTH, HEIGHT = region.calculate_width_height(res)\n ultra_large = False\n if WIDTH * HEIGHT > 2 * Image.MAX_IMAGE_PIXELS:\n ultra_large = True\n\n # Use the following dictionary to get the coordinates of each tile\n geoTran_d = TileUtils.getGeoTransform(tiff_path)\n\n # Check for valid tiling dimensions\n if (tile.width > WIDTH or tile.height > HEIGHT):\n raise argparse.ArgumentTypeError(\"Tiling dimensions greater than image dimensions\")\n\n # Determine the number of tiles per row and column\n if tile.handling == Handling.discard_incomplete_tiles:\n num_rows = (HEIGHT - tile.height * tile.overlap) // (tile.height * (1 - tile.overlap))\n num_cols = (WIDTH - tile.width * tile.overlap) // (tile.width * (1 - tile.overlap))\n else:\n num_rows = math.ceil((HEIGHT - tile.height * tile.overlap) / (tile.height * (1 - tile.overlap)))\n num_cols = math.ceil((WIDTH - tile.width * tile.overlap) / (tile.width * (1 - tile.overlap)))\n\n num_iterations = num_rows * num_cols\n \n # Find the pixel coordinate extents of each tile to be generated\n print(\"Gathering tiling information...\", end=\"\", flush=True)\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((metadata, tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols)), list(range(num_iterations)))\n pixel_coords = pool.map(getTilingSplitCoordsMP, args)\n else:\n pixel_coords = []\n for index in range(num_iterations):\n pixel_coords.append(getTilingSplitCoordsTuple(metadata,tile, WIDTH, HEIGHT, geoTran_d, tile_date_path, num_rows, num_cols, index))\n print(\"done!\")\n\n if mp:\n print(\"Generating {} tiles using {} processes...\".format(len(pixel_coords), NUM_CORES), flush=True)\n else:\n print(\"Generating {} tiles sequentially...\".format(len(pixel_coords)), flush=True)\n\n if ultra_large: \n # Create the intermediate tiles\n inter_dir, img_width, img_height = TileUtils.img_to_intermediate_images(tiff_path, tile, WIDTH, HEIGHT, metadata.date, img_format)\n\n # Add each coordinate to its proper list\n intermediate_files = [f for f in os.listdir(inter_dir) if f.endswith(img_format)]\n\n # Get the tiling information for all intermediate tiles\n intermediate_info = TileUtils.getIntermediateTilingInfo(tile, pixel_coords, WIDTH, HEIGHT, img_width, img_height, intermediate_files)\n\n # Tile the complete images\n print(\"\\tTiling from complete images\")\n for single_inter_imgs in tqdm(intermediate_info[0]):\n filename = single_inter_imgs[0][0]\n inter_metadata = IntermediateMetadata(filename)\n\n img_path = os.path.join(inter_dir, filename)\n src = Image.open(img_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n \n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format,), kwds={\"inter_x\":(x - inter_metadata.start_x), \"inter_y\":(y - inter_metadata.start_y)}) for (filename, x, y, done_x, done_y, path) in single_inter_imgs]\n f = [p.get() for p in multi]\n pool.close()\n pool.join()\n else: \n for filename, x, y, done_x, done_y, path in single_inter_imgs:\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, inter_x=(x - inter_metadata.start_x), inter_y=(y - inter_metadata.start_y), img_arr=img_arr)\n\n # Close the image\n src.close()\n # Tile in between two images\n print(\"\\tTiling between two images\")\n if mp:\n with Pool(processes=NUM_CORES) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[1])\n result = list(tqdm(pool.imap(processDoublesMP, args), total=len(intermediate_info[1])))\n else:\n for double_inter_imgs in tqdm(intermediate_info[1]):\n processDoublesTuple(tile.width, tile.height, inter_dir, img_format, double_inter_imgs)\n \n # Tile in between four images\n print(\"\\tTiling between four images\")\n if mp:\n # Use half as many processes as cores to ensure not running out of available mem and getting stuck\n with Pool(processes=(NUM_CORES // 2)) as pool:\n args = zip(repeat((tile.width, tile.height, inter_dir, img_format)), intermediate_info[2])\n result = list(tqdm(pool.imap(processQuadsMP, args), total=len(intermediate_info[2])))\n else:\n for quad_inter_imgs in tqdm(intermediate_info[2]):\n processQuadsTuple(tile.width, tile.height, inter_dir, img_format, quad_inter_imgs)\n shutil.rmtree(inter_dir)\n else: \n # Open image as a numpy array in order to tile from the array\n src = Image.open(tiff_path)\n img_arr = np.array(src)\n\n if mp:\n # Create a shared array\n X_shape = img_arr.shape\n X = RawArray('B', X_shape[0] * X_shape[1] * X_shape[2])\n\n # Wrap shared array as numpy array\n X_np = np.frombuffer(X, dtype='uint8').reshape(X_shape)\n\n # Copy image to the shared array\n np.copyto(X_np, img_arr)\n\n # Use multiprocessing to tile the numpy array\n with Pool(processes=NUM_CORES, initializer=init_worker, initargs=(X, X_shape, None, None)) as pool:\n multi = [pool.apply_async(TileUtils.generate_tile, args=(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format)) for (x, y, done_x, done_y, path) in pixel_coords]\n f = [p.get() for p in tqdm(multi)]\n pool.close()\n pool.join()\n else:\n for x, y, done_x, done_y, path in tqdm(pixel_coords):\n TileUtils.generate_tile(tile, WIDTH, HEIGHT, x, y, done_x, done_y, path, img_format, img_arr=img_arr)\n \n # Close the image\n src.close()\n print(\"done!\")",
"def test_image(filename, x_size=def_x_size, y_size=def_y_size):\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (random.randint(0, 255), # Red channel\n random.randint(0, 255), # Green channel\n random.randint(0, 255)) # Blue channel\n im.save(filename)",
"def _tile_image(self, data):\n image = Image.open(BytesIO(data))\n return image.convert('RGBA')",
"def __init__(self, group, image, x, y, tile_size):\n\t\tsuper().__init__(group, image, x, y, tile_size)",
"def tile(self, x: int, y: int):\n return self.awmap.tile(x, y)",
"def montage(images, w_sub, h_sub, step):\n target = Image.new('RGB', (w_sub*step, h_sub*step))\n left = 0\n right = w_sub\n for i in range(len(images)):\n top=(i//step)*h_sub\n target.paste(images[i], (left, top, right, top+h_sub))\n if(i//step < (i+1)//step):#Check if this row is done\n left = 0#Reset the position in a row\n right = w_sub\n else: #Next picture\n left += w_sub\n right += w_sub\n quality_value = 100\n return target",
"def image_to_tiles(img, tile_size):\n padding_argument = [(0,0),(0,0),(0,0)]\n for input_dim in [0,1]:\n padding_argument[input_dim] = (0, (tile_size - img.shape[input_dim]) % tile_size)\n img = np.pad(img, padding_argument, mode='constant')\n tiles = img.reshape((img.shape[0]//tile_size, \n tile_size,\n img.shape[1]//tile_size,\n tile_size,\n img.shape[2]\n )).swapaxes(1,2)\n return tiles",
"def process_tile(tile):\n global base_kwds, resampling, src\n # Get the bounds of the tile.\n ulx, uly = mercantile.xy(\n *mercantile.ul(tile.x, tile.y, tile.z))\n lrx, lry = mercantile.xy(\n *mercantile.ul(tile.x + 1, tile.y + 1, tile.z))\n\n kwds = base_kwds.copy()\n kwds['transform'] = from_bounds(ulx, lry, lrx, uly, 256, 256)\n src_nodata = kwds.pop('src_nodata', None)\n dst_nodata = kwds.pop('dst_nodata', None)\n\n with rasterio.open('/vsimem/tileimg', 'w', **kwds) as tmp:\n reproject(rasterio.band(src, src.indexes),\n rasterio.band(tmp, tmp.indexes),\n src_nodata=src_nodata,\n dst_nodata=dst_nodata,\n num_threads=1,\n resampling=resampling)\n\n data = bytearray(virtual_file_to_buffer('/vsimem/tileimg'))\n\n # Workaround for https://bugs.python.org/issue23349.\n if sys.version_info[0] == 2 and sys.version_info[2] < 10:\n # Check for backported bug fix before re-ordering\n\tif kwds['driver'] == 'PNG' and data[0:8] == png_header:\n # Properly constructed PNG, no need to re-order bytes\n pass\n\telif kwds['driver'] == 'JPEG' and data[0:4] == jpeg_header:\n # Properly constructed JPEG, no need to re-order bytes\n pass\n\telse:\n data[:] = data[-1:] + data[:-1]\n\n return tile, data",
"def fill_image(im):\n width, height = im.size\n # Select the larger value of the length and width of the original picture\n # as the radius of the nine palace grid of the new picture\n new_image_len = width if width > height else height\n # Create a white canvas\n new_image = Image.new(im.mode, (new_image_len, new_image_len), color=\"white\")\n # Paste the original image on the canvas at the center\n if width > height:\n new_image.paste(im, (0, int((new_image_len - height) / 2)))\n else:\n new_image.paste(im, (int((new_image_len - width) / 2), 0))\n return new_image",
"def slice_image(image, tile_size):\n height = image.shape[0]\n width = image.shape[1]\n assert height > tile_size and width > tile_size\n\n num_tiles_x, num_tiles_y = number_of_patches(width, height, tile_size)\n width, height = output_image_size(num_tiles_x, num_tiles_y, tile_size)\n\n # Crop image to new size\n image = image[:height, :width]\n\n tiles = np.zeros((num_tiles_y, num_tiles_x, tile_size, tile_size, 3))\n for i, ty in enumerate(range(0, height, tile_size)):\n for j, tx in enumerate(range(0, width, tile_size)):\n tiles[i, j] = image[ty : ty + tile_size, tx : tx + tile_size]\n\n return tiles",
"def image_tiles(bqsession, image_service_url, tile_size=64):\n dims = bqsession.fetchxml(image_service_url, dims='')\n x = int(dims.xpath('//tag[@name=\"image_num_x\"]')[0].attrib[ 'value'])\n y = int(dims.xpath('//tag[@name=\"image_num_y\"]')[0].attrib[ 'value'])\n \n for ix in range(int(x/tile_size)-1):\n for iy in range(int(y/tile_size)-1):\n yield bqsession.c.prepare_url(image_service_url, tile='0,%s,%s,%s' % (str(ix), str(iy), str(tile_size)))",
"def query_image_tile(self, coord):",
"def new_image(x, y, out, data):\n img = Image.new('RGB', (x, y))\n img.putdata(data)\n img.save(out)",
"def make_layers(self):\n w, h = self.image.get_size()\n shrink = pg.transform.smoothscale(self.image, (w//2, h//2))\n self.mid_image = tools.tile_surface((w,h), shrink, True)\n shrink = pg.transform.smoothscale(self.image, (w//4, h//4))\n self.base = tools.tile_surface(prepare.SCREEN_SIZE, shrink, True)"
] | [
"0.69168466",
"0.65878046",
"0.6285758",
"0.6285758",
"0.626664",
"0.62628806",
"0.619035",
"0.6130948",
"0.61250263",
"0.61242956",
"0.61224717",
"0.60801274",
"0.60514724",
"0.60491633",
"0.60491633",
"0.60491633",
"0.6043619",
"0.60361886",
"0.60337836",
"0.6016151",
"0.60026973",
"0.5998464",
"0.5982035",
"0.5976979",
"0.59613556",
"0.59522486",
"0.593237",
"0.59307677",
"0.59190404",
"0.5899575"
] | 0.78441113 | 0 |
Fetch an image from a given URL. | def fetch_image(url: str) -> Image.Image:
r = httpx.get(url)
if not r.status_code == httpx.codes.OK:
raise HTTPException(r.status_code, detail=r.reason_phrase)
f = BytesIO(r.content)
im = handle_image_file(f)
return im | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fetch_image(img_url):\n\n r = requests.get(img_url)\n return r.content",
"def _download_img_from_url(self, img_url):\r\n response = requests.get(img_url)\r\n img = Image.open(BytesIO(response.content))\r\n print(\"Downloaded image from url\")\r\n return img",
"def getImage(url):\n response = requests.get(url)\n img = Image.open(BytesIO(response.content))\n return img",
"def get_image_by_url(url):\n retry_count = 0\n while True:\n try:\n req_headers = {\"User-Agent\": DEFAULT_REQUEST_UA}\n r = requests.get(\n url, headers=req_headers, stream=True, timeout=DEFAULT_REQUEST_TIMEOUT\n )\n image_data = r.content\n if isinstance(image_data, bytes):\n image_data = BytesIO(image_data)\n else:\n image_data = StringIO(image_data)\n\n im = Image.open(image_data)\n return im\n except Timeout as e:\n if retry_count <= DEFAULT_REQUEST_RETRY:\n continue\n else:\n raise e\n except Exception as e:\n logging.exception(e)\n raise RequestException(e)",
"def download_pil_image(self, url):\r\n return Image.open(urlopen(url))",
"def get_img_from_url(index, url):\n try:\n with urllib.request.urlopen(url) as response:\n if response.headers.get_content_maintype() == 'image':\n image_filename = image_filename_prefix.format(name=image_class_name,\n counter=index,\n ext=response.headers.get_content_subtype())\n image_filepath = os.path.join(target_folder, image_filename)\n with open(image_filepath, 'wb') as image_file:\n image_file.write(response.read())\n\n print('Fetched URL {}'.format(index))\n\n except urllib.request.HTTPError:\n pass\n except Exception:\n pass",
"def download_image(url):\n buffer = BytesIO()\n download_from_url(url, buffer, pbar=False)\n buffer.seek(0)\n return Image.open(buffer)",
"def read_image(url):\n f = urllib2.urlopen(url)\n img = StringIO(f.read())\n return Image.open(img)",
"def getOrDownloadImageObject(self, url):\n \n if \"//\" in url:\n return self.downloadImage(url)\n else:\n return self.getPILFromPath(url)",
"def get_image(self, url):\n\n log(\"Getting image {}\".format(url))\n response = requests.get(url)\n if response.status_code == 200:\n image = self._pilimg.open(io.BytesIO(response.content))\n return image.convert('RGBA')\n return None",
"def urlToImage(url):\n\n response = requests.get(url)\n image = Image.open(BytesIO(response.content))\n return image",
"def downloadImage(self, url):\n req = urllib2.Request(url)\n response = urllib2.urlopen(req)\n data = response.read()\n io = cStringIO.StringIO(data)\n return PIL.Image.open(io)",
"def _url_to_image(url: str) -> Image.Image:\n assert url.lower().startswith(\"http\"), \"invalid url, must start with http\"\n content = requests.get(url).content\n image = Image.open(BytesIO(content))\n return image",
"def download_image(url):\n request = urllib.request.Request(\n url, headers={'Authorization': 'Bearer %s' % ACCESS_TOKEN})\n return urllib.request.urlopen(request).read()",
"def GET(self, url):\n try:\n f = open(url, 'r')\n image = f.read()\n f.close()\n except:\n\n db_module.resave_img(url[5:])\n\n f = open(url, 'r')\n image = f.read()\n f.close()\n\n return image",
"def get_image(url, path):\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"[>] get\", url, \">>\", path)\n f.close()",
"def download_image(url):\n request = urllib.request.Request(url, headers={'Authorization': 'Bearer %s' % BOT_TOKEN})\n return urllib.request.urlopen(request).read()",
"async def get_image(session, url):\n async with session.get(url) as resp:\n if resp.status != 200:\n logging.error(f'response status != 200 for image {url}')\n return None\n return await resp.read()",
"def joblib_read_img_url(url):\n\n from matplotlib.image import imread\n fd = urlopen(url, timeout=10)\n return imread(io.BytesIO(fd.read()))",
"def from_url(self) -> PngImagePlugin.PngImageFile:\n response = requests.get(self.url)\n img = Image.open(BytesIO(response.content))\n\n return img",
"def load_remote_image(image_url):\n response = requests.get(image_url, stream=True)\n img = Image.open(BytesIO(response.content))\n image = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)\n return image",
"def getResponseFromHttpRequest(url):\n try:\n response = HTTP.Request(url, headers = {'User-agent': USER_AGENT, 'Accept': 'image/jpeg'})\n return response\n except:\n Log.Debug('Error fetching URL: \"%s\".' % url)\n return None",
"def set_image_from_url(self, url: str):\n response = httpx.get(url)\n if response.status_code == 200:\n file = ContentFile(response.content)\n file.name = \"url-\" + shortuuid.uuid()\n self.image = file\n self.save()",
"def get_image(\n url: str\n) -> Union[Dict[str, Union[int, str, BytesIO, None]], None]:\n try:\n logger.info('downloading image: %s', url)\n r = requests.get(url)\n\n if r.status_code == 200:\n\n # loading binary data to mem\n img = BytesIO(r.content)\n\n # loading image to PIL\n pil_img = Image.open(img)\n\n # seek to 0\n img.seek(0)\n\n return {\n 'content-type': r.headers.get('Content-Type'),\n 'image': img,\n 'width': pil_img.width,\n 'height': pil_img.height,\n }\n\n raise Exception('wrong status code %s', r.status_code)\n\n except BaseException as e:\n logger.error('could not download and analyze img: %s', str(e))\n\n return None",
"def download_image(url, filename):\n r = requests.get(url)\n open(filename, 'wb').write(r.content)",
"def download_image(filename):\n return ImageApiHandler.image_handler.get(filename)",
"def url2img(url : str, timeout = 1) -> Image:\n\n response = requests.get(url, timeout = timeout)\n return Image.open(BytesIO(response.content))",
"def download(self, url):\n req = self.request(url)\n inputfile, outputfile = BytesIO(urlopen(req).read()), BytesIO()\n\n img = Image.open(inputfile)\n img = img.convert(\"RGB\") if img.mode != \"RGB\" else img\n img.thumbnail((192, 192), Image.ANTIALIAS)\n img.save(outputfile, \"JPEG\")\n\n self.image.save(os.path.basename(\n self._clean_url(url)),\n ContentFile(outputfile.getvalue()),\n save=False,\n )",
"def fetchImgOrDir(url, verboseLogs):\n try:\n resp = urllib.request.urlopen(url)\n except Exception as e:\n if verboseLogs:\n logging.error('Result of fetch from %s: %s', url, str(e))\n return (None, None)\n if resp.getheader('content-type') == 'image/jpeg':\n return ('img', resp)\n else:\n return ('dir', resp)",
"def get_content(url):\n img=requests.get(url).content\n return img"
] | [
"0.8507021",
"0.80610484",
"0.79975206",
"0.7953096",
"0.79118997",
"0.7878863",
"0.7840954",
"0.7823508",
"0.77485085",
"0.77150685",
"0.7658794",
"0.76583415",
"0.7470759",
"0.7440883",
"0.7416042",
"0.74151766",
"0.73907363",
"0.73734015",
"0.7324511",
"0.7323661",
"0.73060286",
"0.7284261",
"0.7235358",
"0.71439093",
"0.7028359",
"0.7026992",
"0.6953102",
"0.69311",
"0.6927938",
"0.6915745"
] | 0.8826489 | 0 |
Test case for command_trigger_webhook_post Launch a command via a Trigger | def test_command_trigger_webhook_post(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def test_webhook_endpoint_generates_telegram_command_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n update_message_command,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_command\")\n\n response = await client.post(TELEGRAM_WEBHOOK_URL, json=update_message_command)\n assert response.status == 200\n assert (await response.read()).decode(\"utf-8\") == \"\"\n\n # Make sure event has fired\n await hass.async_block_till_done()\n\n assert len(events) == 1\n assert events[0].data[\"command\"] == update_message_command[\"message\"][\"text\"]",
"def trigger_build(self, postdata):\n pass",
"async def test_receive_post_ok(self):\n await self.webhook_connection.connect()\n assert self.webhook_connection.is_connected is True\n payload = {\"hello\": \"world\"}\n call_task = self.loop.create_task(self.call_webhook(\"test_topic\", json=payload))\n envelope = await asyncio.wait_for(self.webhook_connection.receive(), timeout=10)\n\n assert envelope\n\n message = cast(HttpMessage, envelope.message)\n dialogue = self.skill_dialogues.update(message)\n assert dialogue is not None\n assert message.method.upper() == \"POST\"\n assert message.body.decode(\"utf-8\") == json.dumps(payload)\n await call_task",
"def command_webhook(request):\n print(json.dumps(request.POST.copy(), indent=2))\n\n return JsonResponse({\"text\": \"ChangeTip services have been discontinued. See https://www.reddit.com/r/changetip/comments/5dn3rc/changetip_shutting_down/ Please close your account and disconnect ChangeTip from Slack.\"})\n\n if request.POST.get(\"noop\"):\n return JsonResponse({\"text\": \"Hi!\"})\n\n # Separated so we can still support the legacy webhook integration\n if 'command' in request.POST.keys():\n return slash_command(request)\n else:\n return outgoing_webhook(request)",
"def handle_post(self, api, command):\n return self._make_request_from_command('POST', command)",
"def test_create_trigger_with_curl(command_curl, test_rma_url, test_cma_creds):\n with open(\"etc/trigger.json\", \"r\") as json_file:\n data = json.load(json_file)\n cmd = [\n command_curl,\n \"--anyauth\",\n \"--user\",\n test_cma_creds,\n \"-v\",\n \"-H\",\n \"Content-type: application/json\",\n \"-d\",\n json.dumps(data),\n f\"{test_rma_url}/databases/kerndaten/triggers?format=json\",\n ]\n curl_result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)\n print(curl_result.returncode)\n print(curl_result.stdout)\n assert curl_result.returncode == 0\n\n # sanity check trigger has been created\n assert check_resource_exists(\n test_cma_creds, test_rma_url, \"databases/kerndaten/triggers/only-one-crawler\"\n )",
"async def trigger_build(self, *, branch=None, message=None):",
"def test_post_hooks(self):\n os.makedirs('/tmp/localhost/pacha_post')\n touch_script = open('/tmp/localhost/pacha_post/bar.sh', 'w')\n touch_script.write('''touch /tmp/localhost/post_got_executed.txt''')\n touch_script.close()\n run = rebuild.Rebuild(hostname='localhost') \n run.post_hooks()\n self.assertTrue(os.path.isfile('/tmp/localhost/post_got_executed.txt'))",
"def handle_github_webhook():\n\n verify_signature(request)\n logger.info(\"Received webhook\")\n\n if should_deploy(request):\n schedule_deploy()\n\n return \"\"",
"def git_webhook():\n client = MongoClient(os.getenv('MONGODB_URI', 'mongodb://localhost:27017'))\n database = client.get_database()\n content = {\n \"event\": request.headers['X-GitHub-Event'],\n \"payload\" : request.json,\n \"date\": datetime.utcnow()\n }\n log.info(\"Content Received - \", request.headers['X-GitHub-Delivery'])\n inserted_id = database.events.insert_one(content).inserted_id\n log.info(\"Content Inserted - \", inserted_id)\n return jsonify({\n \"message\": \"Okay!\"\n })",
"def run_trigger_command(self, workdir: str, args: argparse.Namespace):\n for response_line in self.stub.exec_command(\n on_device_tests_gateway_pb2.OnDeviceTestsCommand(\n workdir=workdir,\n token=args.token,\n test_type=args.test_type,\n platform=args.platform,\n archive_path=args.archive_path,\n config=args.config,\n tag=args.tag,\n labels=args.label,\n builder_name=args.builder_name,\n change_id=args.change_id,\n build_number=args.build_number,\n loader_platform=args.loader_platform,\n loader_config=args.loader_config,\n version=args.version,\n dry_run=args.dry_run,\n dimension=args.dimension or [],\n unittest_shard_index=args.unittest_shard_index,\n test_attempts=args.test_attempts,\n retry_level=args.retry_level,\n )):\n\n print(response_line.response)",
"def test_issue_post_issue_reaction(self):\n pass",
"def test_webhook_build_success(self):\n payload = json.dumps({\n 'matrix': [\n {\n 'config': {\n 'env': [\n 'REVIEWBOARD_STATUS_UPDATE_ID=%d'\n % self.status_update.pk,\n 'REVIEWBOARD_TRAVIS_INTEGRATION_CONFIG_ID=%d'\n % self.config.pk,\n ],\n },\n },\n ],\n 'build_url': 'https://example.com/build',\n 'state': 'passed',\n })\n self.spy_on(TravisCIWebHookView._validate_signature,\n owner=TravisCIWebHookView,\n call_fake=lambda self, request, integration_config: True)\n\n rsp = self.client.post(self.webhook_url, {'payload': payload})\n\n self.assertEqual(rsp.status_code, 200)\n\n self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)\n self.assertEqual(self.status_update.url, 'https://example.com/build')\n self.assertEqual(self.status_update.state,\n StatusUpdate.DONE_SUCCESS)",
"def _send_post_request(self, item):\n tc_name = get_tcname(item)\n try:\n env_prop = item.config.env.env_prop\n except AttributeError:\n buildname = self.UNDEFINED_BUILD\n else:\n buildname = self.buildname(env_prop)\n suite_name = get_suite_name(item.nodeid)\n info = {\"brief\": get_brief(item, tc_name), \"description\": get_steps(item, tc_name)}\n\n if self.post_queue:\n self._send_post_queue(item, buildname)\n self.server_cmd(\"post\", [self.self_name, buildname, suite_name, tc_name, \"Run\", \"\", info, self._get_build_info(item)])",
"def at_post_cmd(self):\n pass",
"def Trigger(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('trigger', payload=payload, response_object=None)",
"def test_valid_webhook(self, mock_send):\n send_notification(\"valid_webhook\", self.message)\n mock_send.assert_called()",
"def build_trigger(ctx, build_type_id, branch, comment, parameter, agent_id,\n open_build_log, wait_for_run):\n parameters = dict([p.split('=', 1) for p in parameter])\n data = ctx.obj.trigger_build(\n build_type_id=build_type_id,\n branch=branch,\n comment=comment,\n parameters=parameters,\n agent_id=agent_id)\n build_id = data['id']\n ctx.invoke(build_queue_show, args=[build_id])\n if open_build_log:\n url = data['webUrl'] + '&tab=buildLog'\n webbrowser.open(url)\n if not wait_for_run:\n return\n while data['state'] == 'queued':\n data = ctx.obj.get_queued_build_by_build_id(build_id)\n click.echo('state: %s' % data['state'])\n time.sleep(1)\n ctx.invoke(build_queue_show, args=[build_id])",
"def slackbuild_webhook(req: Request):\n global config\n global slack\n global cloudbuild\n\n # slack submits a POST\n if req.method != \"POST\":\n return abort(405)\n\n # not a true request from slack\n verified, err = slack.verify_webhook(req)\n if not verified:\n print(err)\n return abort(403)\n\n body = Slack.parse_request(req)\n argv = Slack.parse_command(body)\n msg = \"\"\n\n output, success = Command.run(argv, cloudbuild, config)\n\n if output is None:\n if success:\n # intentionaly not responding with a slack message\n return ('', 200)\n else:\n return abort(500)\n elif Slack.is_interactive_message(body):\n msg = slack.render_interactive_message(body, success, output)\n else:\n color = Colors.SUCCESS if success else Colors.FAILURE\n msg = slack.render_message({\"result\": output, \"color\": color}, \"command.json\")\n\n msg = json.dumps(msg)\n print(msg)\n return Response(response=msg, content_type=\"application/json\")",
"def webhook_sender(url=WEBHOOK_URL):\n data = runner()\n print(json.dumps(data))\n try:\n r = requests.post(url,json=data)\n print(r)\n except requests.exceptions.RequestException as e:\n raise SystemExit(e)",
"async def test_webhook_endpoint_generates_telegram_callback_event(\n hass: HomeAssistant,\n webhook_platform,\n hass_client: ClientSessionGenerator,\n update_callback_query,\n) -> None:\n client = await hass_client()\n events = async_capture_events(hass, \"telegram_callback\")\n\n response = await client.post(TELEGRAM_WEBHOOK_URL, json=update_callback_query)\n assert response.status == 200\n assert (await response.read()).decode(\"utf-8\") == \"\"\n\n # Make sure event has fired\n await hass.async_block_till_done()\n\n assert len(events) == 1\n assert events[0].data[\"data\"] == update_callback_query[\"callback_query\"][\"data\"]",
"def send(self):\n payload = self.format_payload()\n\n # Makes sure that the required fields are provided before\n # sending the payload.\n if not self.webhook_url:\n print ('Error: Webhook URL is required.')\n\n elif not payload:\n print ('Error: Message payload cannot be empty.')\n\n else:\n try:\n request = requests.post(self.webhook_url,\n data=json.dumps(payload),\n headers={'Content-Type': 'application/json'})\n\n request.raise_for_status()\n\n except requests.exceptions.RequestException as error:\n print('Error: %s' % error)",
"def test_bot_triggered_event(self):\n lh = LambdaHandler(\"tests.test_bot_handler_being_triggered\")\n # from : https://docs.aws.amazon.com/lambda/latest/dg/eventsources.html#eventsources-lex\n event = {\n \"messageVersion\": \"1.0\",\n \"invocationSource\": \"DialogCodeHook\",\n \"userId\": \"user-id specified in the POST request to Amazon Lex.\",\n \"sessionAttributes\": {\n \"key1\": \"value1\",\n \"key2\": \"value2\",\n },\n \"bot\": {\"name\": \"bot-name\", \"alias\": \"bot-alias\", \"version\": \"bot-version\"},\n \"outputDialogMode\": \"Text or Voice, based on ContentType request header in runtime API request\",\n \"currentIntent\": {\n \"name\": \"intent-name\",\n \"slots\": {\n \"slot-name\": \"value\",\n \"slot-name\": \"value\",\n \"slot-name\": \"value\",\n },\n \"confirmationStatus\": \"None, Confirmed, or Denied (intent confirmation, if configured)\",\n },\n }\n\n response = lh.handler(event, None)\n\n self.assertEqual(response, \"Success\")",
"def test_workflows_post(self):\n pass",
"def test_slackP_send(get_slackpost, capsys):\n s = get_slackpost\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out",
"def test_slackWH_send_good(get_slackwebhook, capsys):\n s = get_slackwebhook\n s.send()\n out, err = capsys.readouterr()\n assert \"Message sent\" in out",
"def POST(self):\n\t\t\n\t\tjson_data = web.data()\t\t# Get the POST data sent from Webex Teams\n\t\t#print(\"\\nWEBHOOK POST RECEIVED:\")\n\t\t#print(json_data, \"\\n\")\n\n\t\twebhook_obj = Webhook(json_data)\t\t\t\t\t# Create a Webhook object from the JSON data\n\t\troom = api.rooms.get(webhook_obj.data.roomId)\t\t# Get the room details\n\t\tmessage = api.messages.get(webhook_obj.data.id)\t\t# Get the message details\n\n\t\t# Ignore messages bot itself sent\n\t\tif message.personId == me.id:\n\t\t\treturn 'OK'\n\t\telse:\t# Message was sent by someone else; parse message and respond.\n\t\t\tperson = api.people.get(message.personId)\t\t\t# Get the sender's details\n\t\t\t\n\t\t\tprint(\"NEW MESSAGE IN ROOM '{}'\".format(room.title))\n\t\t\tprint(\"FROM '{}'\".format(person.displayName))\n\t\t\tprint(\"MESSAGE '{}'\\n\".format(message.text))\n\n\t\t\t#Test message sent\n\t\t\t#response = 'Message received {}'.format(mention(person.emails[0]))\t\t\n\t\t\t#api.messages.create(room.id, markdown=response)\n\t\t\tactionSelector(api, message, teams)\t\t\t\t#Depending on message defines action to perform\t\t\n\t\t\t\n\t\treturn 'OK'",
"def test_postMessage(self): #GIVEN the appropriate environment variables are configured\n testBot = bot.Bot(os.environ['bot_id'], os.environ['token'], os.environ['group_ID'])\n status = testBot.postMessage('Zygium') #WHEN the bot posts a message\n self.assertTrue(status == 202) # a status code of 202 should be returned",
"def post(self):\n send_slack_log('Entered /slack/post_msg')\n send_slack_log('Request info:')\n send_slack_log(str(request.form))\n # unknown request.form\n trigger_id = request.form['trigger_id']\n channel_id = request.form['channel_id']\n response = open_form(channel_id,\n trigger_id,\n config['slack_post_form_path'])\n send_slack_log('Response info:')\n send_slack_log(str(response))\n return 'Please enter the new msg information in the form'",
"def test_user_actions_post(self):\n pass"
] | [
"0.6756358",
"0.63620687",
"0.6353586",
"0.6080397",
"0.60362595",
"0.5910809",
"0.59053904",
"0.58640003",
"0.58217233",
"0.57251173",
"0.5721428",
"0.5700421",
"0.5697234",
"0.5693077",
"0.56879246",
"0.5658715",
"0.5656193",
"0.5644521",
"0.5596793",
"0.5591541",
"0.55492896",
"0.5533883",
"0.55296797",
"0.5521012",
"0.54766965",
"0.54514354",
"0.54494166",
"0.5446674",
"0.54450625",
"0.54301065"
] | 0.88001287 | 0 |
Checks if specified operation is allowed on the resource. | def _operation_allowed(self, headers_dict, operation):
if 'allow' in headers_dict:
if operation in headers_dict['allow']:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_allowed(self, role, operation, resource):\r\n assert not role or role in self._roles\r\n assert not resource or resource in self._resources\r\n\r\n roles = set(get_family(self._roles, role))\r\n operations = set([None, operation])\r\n resources = set(get_family(self._resources, resource))\r\n\r\n is_allowed = None\r\n default_assertion = lambda *args: True\r\n\r\n for permission in itertools.product(roles, operations, resources):\r\n if permission in self._denied:\r\n assertion = self._denied[permission] or default_assertion\r\n if assertion(self, role, operation, resource):\r\n return False # denied by rule immediately\r\n\r\n if permission in self._allowed:\r\n assertion = self._allowed[permission] or default_assertion\r\n if assertion(self, role, operation, resource):\r\n is_allowed = True # allowed by rule\r\n\r\n return is_allowed",
"def supports_operation(self, operation: str) -> bool:\n return True",
"def supports_operation(self, operation: str) -> bool:\n return operation in OPERATION_SUPPORT_BY_TYPE[self.backing_type]",
"def is_any_allowed(self, roles, operation, resource):\r\n is_allowed = None # there is not matching rules\r\n for role in roles:\r\n is_current_allowed = self.is_allowed(role, operation, resource)\r\n if is_current_allowed is False:\r\n return False # denied by rule\r\n elif is_current_allowed is True:\r\n is_allowed = True\r\n return is_allowed",
"def _is_valid_fetch_operation(operation):\n if operation in FetchQuantity._supported_fetch_operations():\n return True\n else:\n return False",
"def permit_required(self):\n return \"permission\" in self.description.lower()",
"async def contains(self, operation: Operation) -> bool:\n return operation.instance_name in self.operations",
"def check_zone_operation(self, zone, operation):\n assert is_valid_zone(zone), zone\n return self.call_api('/zones/%s/operations/%s' % (zone, operation))",
"def check_rights(self, resources, request=None):\r\n if not self.auth:\r\n return True\r\n\r\n try:\r\n if not self.auth.test_rights(resources, request=request):\r\n raise AssertionError()\r\n\r\n except AssertionError, e:\r\n raise HttpError(\r\n \"Access forbiden. {0}\".format(e),\r\n status=status.HTTP_403_FORBIDDEN\r\n )",
"def has_access(self, action_name: str, resource_name: str, user=None) -> bool:\n if not user:\n user = g.user\n if (action_name, resource_name) in user.perms:\n return True\n\n if self.is_dag_resource(resource_name):\n if (action_name, permissions.RESOURCE_DAG) in user.perms:\n return True\n return (action_name, resource_name) in user.perms\n\n return False",
"def acl_check_entity(self, entity, auth_context, op, obj):\n acl_check = (\n entity.acl_check(auth_context, op, obj)\n if entity.has_acl()\n else self.default_acl.acl_check(auth_context, op, obj))\n if not acl_check:\n raise exceptions.AclError(\n 'unauthorized change to %s' % (\n entity.name,))",
"def check_allowed(self):\n if self.state_model.op_state in [\n DevState.FAULT,\n DevState.UNKNOWN,\n DevState.DISABLE,\n ]:\n return False\n\n return True",
"def allow(self, role, operation, resource, assertion=None):\r\n assert not role or role in self._roles\r\n assert not resource or resource in self._resources\r\n self._allowed[role, operation, resource] = assertion",
"def check_action_permissions(self, request, action, obj=None):\n if action is None:\n self.permission_denied(request)\n\n for permission in self.get_permissions():\n if not permission.has_action_permission(request, self, action, obj):\n self.permission_denied(request)",
"def check_permission(self, operation, resource, **exception_kwargs):\r\n exception = exception_kwargs.pop(\"exception\", PermissionDenied)\r\n checker = functools.partial(self._docheck, operation=operation,\r\n resource=resource)\r\n return PermissionContext(checker, exception, **exception_kwargs)",
"def test_allow(self) -> None:\n response = self.request(\"/\", method=\"HEAD\")\n self.assert_allowed(response, (\"GET\", \"POST\"))",
"def __CheckOpen(self, operation):\n if self.__closed:\n raise ValueError('%s() on a closed stream is not permitted' %\n operation)",
"def __CheckOpen(self, operation):\n if self.__closed:\n raise ValueError('%s() on a closed stream is not permitted' %\n operation)",
"def has_permission(self, request, view):\n if settings.ENHANCED_ORG_ADMIN and request.user.admin:\n return True\n\n if not request.user.access:\n return False\n\n if request.method in permissions.SAFE_METHODS:\n rates_read = request.user.access.get(\"cost_model\", {}).get(\"read\", [])\n if rates_read:\n return True\n else:\n rates_write = request.user.access.get(\"cost_model\", {}).get(\"write\", [])\n if \"*\" in rates_write:\n return True\n if self.get_uuid_from_url(request) in rates_write:\n return True\n return False",
"def validate(self, raw_op):\n log.info(\"validating @%s op %s\", self.actor, raw_op)\n\n try:\n # validate basic structure\n self._validate_raw_op(raw_op)\n self.action = raw_op[0]\n self.op = raw_op[1]\n self.actor_id = Accounts.get_id(self.actor)\n\n # validate and read schema\n self._read_schema()\n\n # validate permissions\n self._validate_permissions()\n\n self.valid = True\n\n except AssertionError as e:\n payload = str(e)\n Notify('error', dst_id=self.actor_id,\n when=self.date, payload=payload).write()\n\n return self.valid",
"def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0",
"def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0",
"def is_allowed(self) -> bool:\n return self.effect == ALLOW_ACCESS",
"def is_allowed_to_do(cls, db_tuple, action, target, actor, should_raise_insufficent_priv_ex=True):\n action_check_fn = cls.get_action_check_fn(action)\n \n if action_check_fn is None:\n raise cls.UnrecognizedActionException('unrecognized action: %s' % action)\n \n # i do what i want!\n if actor.metaspace_privileges.has_privilege(MetaspacePrivilegeSet.SUPER):\n return True\n \n can_do_action = action_check_fn(db_tuple, target, actor)\n if should_raise_insufficent_priv_ex and not can_do_action:\n raise cls.InsufficientPrivilegesException('%s (user_id=%i) is not allowed to perform %s' % (actor.email_addr, actor.user_id, action))\n else:\n return can_do_action",
"def check_permission(user, action_name, app_label, model_name):\r\n p = '%s.%s_%s' % (app_label, action_name, model_name)\r\n return user and user.is_active and user.has_perm(p)",
"async def permission_valid_check(cls):\n pass",
"def isOp(self):\n return True",
"def can_retry(self, opname):\n\n if self.retry_deny and opname in self.retry_deny:\n return False\n\n if self.retry_allow and opname not in self.retry_allow:\n return False\n\n return True",
"def is_Scan_allowed(self):\n handler = self.get_command_object(\"Scan\")\n return handler.check_allowed()",
"def mask_pass(owned_permissions: int, requested_operation: int,) -> bool:\n return bool(owned_permissions & requested_operation)"
] | [
"0.735948",
"0.72206527",
"0.68997526",
"0.6572741",
"0.64294475",
"0.63755685",
"0.63672787",
"0.6200207",
"0.61940044",
"0.61875075",
"0.6171609",
"0.61679983",
"0.60811335",
"0.607854",
"0.604403",
"0.60305333",
"0.60210794",
"0.60210794",
"0.60156304",
"0.6002486",
"0.59915817",
"0.59915817",
"0.599041",
"0.59650993",
"0.5944636",
"0.59318346",
"0.5909353",
"0.59068096",
"0.5903858",
"0.58919746"
] | 0.77897847 | 0 |
Parse the ExtendedError object and retruns the message. Build a list of decoded messages from the extended_error using the message registries. An ExtendedError JSON object is a response from the with its own schema. This function knows how to parse the ExtendedError object and, using any loaded message registries, render an array of plain language strings that represent the response. | def _render_extended_error_message_list(self, extended_error):
messages = []
if isinstance(extended_error, dict):
if ('Type' in extended_error and
extended_error['Type'].startswith('ExtendedError.')):
for msg in extended_error['Messages']:
message_id = msg['MessageID']
x = message_id.split('.')
registry = x[0]
msgkey = x[len(x) - 1]
# if the correct message registry is loaded,
# do string resolution
if (registry in self.message_registries and msgkey in
self.message_registries[registry]['Messages']):
rmsgs = self.message_registries[registry]['Messages']
msg_dict = rmsgs[msgkey]
msg_str = message_id + ': ' + msg_dict['Message']
for argn in range(0, msg_dict['NumberOfArgs']):
subst = '%' + str(argn+1)
m = str(msg['MessageArgs'][argn])
msg_str = msg_str.replace(subst, m)
if ('Resolution' in msg_dict and
msg_dict['Resolution'] != 'None'):
msg_str += ' ' + msg_dict['Resolution']
messages.append(msg_str)
else:
# no message registry, simply return the msg object
# in string form
messages.append(str(message_id))
return messages | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_extended_error(self, extended_error):\n return self._render_extended_error_message_list(extended_error)",
"def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings = []\n resp_codes = []\n\n if self.verb is None:\n return errors\n\n dom = self.response.dom()\n if dom is None:\n return errors\n\n for e in dom.findall('Errors'):\n eSeverity = None\n eClass = None\n eShortMsg = None\n eLongMsg = None\n eCode = None\n\n try:\n eSeverity = e.findall('SeverityCode')[0].text\n except IndexError:\n pass\n\n try:\n eClass = e.findall('ErrorClassification')[0].text\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n except IndexError:\n pass\n\n try:\n eShortMsg = smart_encode(e.findall('ShortMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eLongMsg = smart_encode(e.findall('LongMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n if int(eCode) not in resp_codes:\n resp_codes.append(int(eCode))\n except IndexError:\n pass\n\n msg = str(\"Class: {eClass}, Severity: {severity}, Code: {code}, {shortMsg} {longMsg}\") \\\n .format(eClass=eClass, severity=eSeverity, code=eCode, shortMsg=eShortMsg,\n longMsg=eLongMsg)\n\n # from IPython import embed; embed()\n\n if eSeverity == 'Warning':\n warnings.append(msg)\n else:\n errors.append(msg)\n\n self._resp_body_warnings = warnings\n self._resp_body_errors = errors\n self._resp_codes = resp_codes\n\n if self.config.get('warnings') and len(warnings) > 0:\n log.warn(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(warnings)))\n\n if self.response.reply.Ack == 'Failure':\n if self.config.get('errors'):\n log.error(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(errors)))\n\n return errors\n\n return []",
"def testExtendedErrorMessage(self):\n\n json_message = current.xml.json_message\n\n msg = json_message(False, 405, message=\"Test\")\n msg = json.loads(msg)\n self.assertEqual(len(msg), 3)\n self.assertEqual(msg[\"status\"], \"failed\")\n self.assertEqual(msg[\"statuscode\"], \"405\")\n self.assertEqual(msg[\"message\"], \"Test\")",
"def load_xcat_resp(message):\n resp_list = jsonloads(message)['data']\n keys = ('info', 'data', 'node', 'errorcode', 'error')\n\n resp = {}\n\n for k in keys:\n resp[k] = []\n\n for d in resp_list:\n for k in keys:\n if d.get(k) is not None:\n resp[k].append(d.get(k))\n\n err = resp.get('error')\n if err != []:\n for e in err:\n if _is_warning(str(e)):\n # ignore known warnings or errors:\n continue\n else:\n raise ZVMException(message)\n\n _log_warnings(resp)\n\n return resp",
"def json(self):\n d = [err.json for err in self.errors]\n return d",
"def odata_error(self, request, environ, start_response, sub_code,\n message='', code=400):\n response_headers = []\n e = core.Error(None)\n e.add_child(core.Code).set_value(sub_code)\n e.add_child(core.Message).set_value(message)\n response_type = self.content_negotiation(\n request, environ, self.ErrorTypes)\n if response_type is None:\n # this is an error response, default to text/plain anyway\n response_type = params.MediaType.from_str(\n 'text/plain; charset=utf-8')\n elif response_type == \"application/atom+xml\":\n # even if you didn't ask for it, you get application/xml in this\n # case\n response_type = \"application/xml\"\n if response_type == \"application/json\":\n data = str(''.join(e.generate_std_error_json()))\n else:\n data = str(e)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (code, sub_code), response_headers)\n return [data]",
"def report_transaction_error_messages(self):\n response = self.__get_transaction_response()\n\n # get response data from response object\n response_data = response.json()\n\n # get error messages\n response_error = response_data['Error']\n response_error_messages = response_error['messages']\n\n # add all error messages to the report\n error_messages_to_report = []\n for response_error_message in response_error_messages:\n error_description = response_error_message['description']\n error_messages_to_report.append(error_description)\n\n return error_messages_to_report",
"def parsed_error_msg(self):\r\n # Translates the category names and messages into something more human readable\r\n message_dict = {\r\n (\"photoIdReasons\", \"Not provided\"): _(\"No photo ID was provided.\"),\r\n (\"photoIdReasons\", \"Text not clear\"): _(\"We couldn't read your name from your photo ID image.\"),\r\n (\"generalReasons\", \"Name mismatch\"): _(\"The name associated with your account and the name on your ID do not match.\"),\r\n (\"userPhotoReasons\", \"Image not clear\"): _(\"The image of your face was not clear.\"),\r\n (\"userPhotoReasons\", \"Face out of view\"): _(\"Your face was not visible in your self-photo\"),\r\n }\r\n\r\n try:\r\n msg_json = json.loads(self.error_msg)\r\n msg_dict = msg_json[0]\r\n\r\n msg = []\r\n for category in msg_dict:\r\n # find the messages associated with this category\r\n category_msgs = msg_dict[category]\r\n for category_msg in category_msgs:\r\n msg.append(message_dict[(category, category_msg)])\r\n return u\", \".join(msg)\r\n except (ValueError, KeyError):\r\n # if we can't parse the message as JSON or the category doesn't\r\n # match one of our known categories, show a generic error\r\n log.error('PhotoVerification: Error parsing this error message: %s', self.error_msg)\r\n return _(\"There was an error verifying your ID photos.\")",
"def get_aggregated_exceptions(self) -> Payload:\n return Payload(aggregated_errors=list(self._aggregated_exceptions.values()))",
"def formatErrors(self):\n errorlist = []\n xepsWithErrors = sorted(\n set(self.getParseErrors() + self.getBuildErrors()),\n key=lambda x: str(x))\n if self.getErrors() or xepsWithErrors:\n if self.getErrors():\n errorlist.append(\"********** Read errors **********\")\n for error in self.getErrors():\n errorlist.append(error)\n for xep in xepsWithErrors:\n errorlist.append(\n \"********** Error report for {} **********\".format(str(xep)))\n if xep.parseErrors:\n errorlist.append(\"********** Parsing Errors **********\")\n errors = list(set(xep.parseErrors))\n for error in errors:\n errorlist.append(error)\n if xep.buildErrors:\n errorlist.append(\"********** Build Errors **********\")\n for error in xep.buildErrors:\n if len(error.splitlines()) > 4:\n error = ''.join(error.splitlines()[:4])\n errorlist.append(error)\n return '\\n'.join(errorlist)\n else:\n return None",
"def testExtendedErrorMessageWithTree(self):\n\n json_message = current.xml.json_message\n\n msg = json_message(False, 405, message=\"Test\", tree='{\"test\": \"value\"}')\n msg = json.loads(msg)\n self.assertEqual(len(msg), 4)\n self.assertEqual(msg[\"status\"], \"failed\")\n self.assertEqual(msg[\"statuscode\"], \"405\")\n self.assertEqual(msg[\"message\"], \"Test\")\n self.assertTrue(isinstance(msg[\"tree\"], dict))\n tree = msg[\"tree\"]\n self.assertEqual(len(tree), 1)\n self.assertEqual(tree[\"test\"], \"value\")",
"def extended(self) -> List:\n return List([String.build(self.maintype), String.build(self.subtype),\n _ParamsList(self.content_type_params),\n String.build(self.content_id),\n String.build(self.content_description),\n String.build(self.content_transfer_encoding,\n fallback=b'7BIT'),\n Number(self.size),\n self.envelope_structure,\n self.body_structure.extended,\n Number(self.lines),\n String.build(self.body_md5),\n String.build(self.content_disposition),\n String.build(self.content_language),\n String.build(self.content_location)])",
"def get_messages(self):\n other_user_email = request.args.get('other_user_email')\n page = request.args.get('page')\n per_page = request.args.get('per_page')\n if not other_user_email or not page or not per_page:\n self.logger.debug(messages.MISSING_FIELDS_ERROR % \"query params\")\n return messages.ERROR_JSON % messages.MISSING_FIELDS_ERROR % \"query params\", 400\n email_token = auth.current_user()[0]\n page = int(page)\n per_page = int(per_page)\n # App sends starting with 1 but we start at 0\n page -= 1\n try:\n message_list, pages = self.friend_database.get_conversation(email_token, other_user_email, per_page, page)\n except NoMoreMessagesError:\n self.logger.debug(messages.NO_MORE_PAGES_ERROR)\n return messages.NO_MORE_PAGES_ERROR, 404\n message_list = [{k:v for k,v in m._asdict().items() if k != \"hidden_to\"} for m in message_list]\n for i in range(len(message_list)):\n message_list[i][\"timestamp\"] = message_list[i][\"timestamp\"].isoformat()\n return json.dumps({\"messages\": message_list, \"pages\": pages}), 200",
"def get_response(self):\n return self.messages",
"def deserialize_known_exception(error):\n message = error['message']\n\n known_exception_type_kwargs = error['known_exception_type_kwargs']\n known_exception_type = getattr(exceptions, error['known_exception_type'])\n known_exception_type_args = error['known_exception_type_args']\n\n if error['append_message']:\n known_exception_type_args.append(message)\n else:\n known_exception_type_args.insert(0, message)\n return known_exception_type(\n *known_exception_type_args,\n **known_exception_type_kwargs\n )",
"def get_error(self) -> List[str]:\n return []",
"def get_error(self) -> List[str]:\n return []",
"def process_sub_serializer_errors(self, serializer_error_dict, error_type):\n sub_serializer_errors = serializer_error_dict.get('errors', [])\n sub_serializer_non_field_errors = serializer_error_dict.get('non_field_errors', None)\n result = []\n for sub_error in sub_serializer_errors:\n if sub_error['field'] is None:\n sub_error['field'] = error_type\n result.append(sub_error)\n if sub_serializer_non_field_errors is not None:\n result.extend(\n self.get_non_field_error_entries(sub_serializer_non_field_errors)\n )\n return result",
"def extract_messages(self,msg_list):\n msgs = []\n for m in msg_list:\n msgs.append(json.loads(str(m)))\n return msgs",
"def parsed_error_msg(self):\r\n return self.error_msg",
"def _process_message(self, response):\n message = str()\n try:\n message = response.json()\n except (simplejson.JSONDecodeError, ValueError) as e:\n message = response.text\n return message",
"def get_errors(response):\n errors = response.get(\"error\")\n if errors:\n return [e.get(\"message\") for e in errors]\n return None",
"def extended(self) -> List:\n parts = [part.extended for part in self.parts]\n return List([_Concatenated(parts), String.build(self.subtype),\n _ParamsList(self.content_type_params),\n String.build(self.content_disposition),\n String.build(self.content_language),\n String.build(self.content_location)])",
"def _get_errors(exc):\n if hasattr(exc, 'message'):\n errors = exc.messages\n else:\n errors = [str(exc)]\n return errors",
"def list(self, query_params=None, **kwargs):\n # type: (WebhookListQueryParams, dict) -> Webhook\n\n return self.api_client.get(\n '/notifications/webhooks/encoding/encodings/error',\n query_params=query_params,\n pagination_response=True,\n type=Webhook,\n **kwargs\n )",
"def _decode(self, message):\n raise NotImplementedError(\"_decode needs to be implemented in {} subclass\".format(type(self).__name__))",
"def __call__(self, environ, start_response):\n start_response(self.status, self.headers)\n return [self.message] if not isinstance(self.message, list) else self.message",
"def msgs_from_bytes(self, b):\n msgs = []\n # User remainder bytes\n parse_bytes = self.remainder + b.decode('ascii')\n # Find the first frame delimiter\n i = parse_bytes.find('\\r\\n')\n while i >= 0:\n # Try to parse a single message\n m = self._parse_msg(parse_bytes[:i])\n # Remove parsed bytes and delimter\n parse_bytes = parse_bytes[i+2:]\n # Add parsed message, if any\n if m:\n msgs.append(m)\n self.logger.debug('Parsed ASCII frame: address={}, function={}, len={}'.format(m.address, m.function, len(m.data) if m.data else 0))\n #else - warn?\n i = parse_bytes.find('\\r\\n')\n # Store any remaining bytes for the next pass\n self.remainder = parse_bytes\n return msgs",
"def _processGETErr(self, e, request):\r\n if e.check(InvalidRequest):\r\n msg = e.getErrorMessage()\r\n code = httplib.BAD_REQUEST\r\n elif e.check(UnauthorizedLogin):\r\n msg = e.getErrorMessage()\r\n code = httplib.UNAUTHORIZED\r\n elif e.check(InternalError):\r\n e.printTraceback()\r\n msg = 'Internal Error'\r\n code = httplib.INTERNAL_SERVER_ERROR\r\n else:\r\n e.printTraceback()\r\n msg = 'Fatal Error'\r\n code = httplib.INTERNAL_SERVER_ERROR\r\n\r\n self._render_GET(request, code, 'text/plain; charset=utf-8', msg)",
"def parse_last_exception(message):\n for pattern, response in patterns:\n items_found = re.findall(pattern, repr(message))\n if items_found:\n #print(\"FOUND\", items_found)\n print_exception_message(response, items_found[0])\n break\n else:\n unrecognised_exception(message)"
] | [
"0.6861036",
"0.57295793",
"0.5393745",
"0.52291447",
"0.51675904",
"0.50977695",
"0.50927067",
"0.50382864",
"0.499844",
"0.49484342",
"0.48670247",
"0.4862164",
"0.48498005",
"0.48152092",
"0.479743",
"0.4794128",
"0.4794128",
"0.4792617",
"0.47888657",
"0.4763331",
"0.4752166",
"0.4748517",
"0.4732601",
"0.4730067",
"0.47297788",
"0.47293502",
"0.47199076",
"0.47054747",
"0.47044468",
"0.4698819"
] | 0.8262206 | 0 |
Gets the list of decoded messages from the extended_error. | def _get_extended_error(self, extended_error):
return self._render_extended_error_message_list(extended_error) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _render_extended_error_message_list(self, extended_error):\n messages = []\n if isinstance(extended_error, dict):\n if ('Type' in extended_error and\n extended_error['Type'].startswith('ExtendedError.')):\n for msg in extended_error['Messages']:\n message_id = msg['MessageID']\n x = message_id.split('.')\n registry = x[0]\n msgkey = x[len(x) - 1]\n\n # if the correct message registry is loaded,\n # do string resolution\n if (registry in self.message_registries and msgkey in\n self.message_registries[registry]['Messages']):\n rmsgs = self.message_registries[registry]['Messages']\n msg_dict = rmsgs[msgkey]\n msg_str = message_id + ': ' + msg_dict['Message']\n\n for argn in range(0, msg_dict['NumberOfArgs']):\n subst = '%' + str(argn+1)\n m = str(msg['MessageArgs'][argn])\n msg_str = msg_str.replace(subst, m)\n\n if ('Resolution' in msg_dict and\n msg_dict['Resolution'] != 'None'):\n msg_str += ' ' + msg_dict['Resolution']\n\n messages.append(msg_str)\n else:\n # no message registry, simply return the msg object\n # in string form\n messages.append(str(message_id))\n\n return messages",
"def retrieve_error_messages(self):\n return self.errors_seen[:]",
"def _get_errors(exc):\n if hasattr(exc, 'message'):\n errors = exc.messages\n else:\n errors = [str(exc)]\n return errors",
"def get_error(self) -> List[str]:\n return []",
"def get_error(self) -> List[str]:\n return []",
"def get_messages(self):\r\n return self.messages",
"def get_messages(self):\n\t\tcontents = self.archive.read_file('replay.message.events')\n\t\treturn self.protocol.decode_replay_message_events(contents)",
"def get_errors(response):\n errors = response.get(\"error\")\n if errors:\n return [e.get(\"message\") for e in errors]\n return None",
"def messages(self):\n return list(iter(self))",
"def error(self) -> list:\n return self.__err",
"def get_encoding_errors(self):\n return self._encoding_errors",
"def _get_resp_body_errors(self):\n\n if self._resp_body_errors and len(self._resp_body_errors) > 0:\n return self._resp_body_errors\n\n errors = []\n warnings = []\n resp_codes = []\n\n if self.verb is None:\n return errors\n\n dom = self.response.dom()\n if dom is None:\n return errors\n\n for e in dom.findall('Errors'):\n eSeverity = None\n eClass = None\n eShortMsg = None\n eLongMsg = None\n eCode = None\n\n try:\n eSeverity = e.findall('SeverityCode')[0].text\n except IndexError:\n pass\n\n try:\n eClass = e.findall('ErrorClassification')[0].text\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n except IndexError:\n pass\n\n try:\n eShortMsg = smart_encode(e.findall('ShortMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eLongMsg = smart_encode(e.findall('LongMessage')[0].text)\n except IndexError:\n pass\n\n try:\n eCode = e.findall('ErrorCode')[0].text\n if int(eCode) not in resp_codes:\n resp_codes.append(int(eCode))\n except IndexError:\n pass\n\n msg = str(\"Class: {eClass}, Severity: {severity}, Code: {code}, {shortMsg} {longMsg}\") \\\n .format(eClass=eClass, severity=eSeverity, code=eCode, shortMsg=eShortMsg,\n longMsg=eLongMsg)\n\n # from IPython import embed; embed()\n\n if eSeverity == 'Warning':\n warnings.append(msg)\n else:\n errors.append(msg)\n\n self._resp_body_warnings = warnings\n self._resp_body_errors = errors\n self._resp_codes = resp_codes\n\n if self.config.get('warnings') and len(warnings) > 0:\n log.warn(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(warnings)))\n\n if self.response.reply.Ack == 'Failure':\n if self.config.get('errors'):\n log.error(\"{verb}: {message}\\n\\n\".format(\n verb=self.verb, message=\"\\n\".join(errors)))\n\n return errors\n\n return []",
"def messages(self):\n return self._messages",
"def messages(self):\n return self._messages",
"def messages(self):\n return self._messages",
"def get_response(self):\n return self.messages",
"def get_messages(self):\n data = self.socket.recv(BUF_SIZE).decode()\n return data.split('\\0')",
"def getMessages(self):\n raise NotImplementedError(\"Child class must implement this\")",
"def getErrorsList(self):\n return self.__errors",
"def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]",
"def msgs_from_bytes(self, b):\n msgs = []\n # User remainder bytes\n parse_bytes = self.remainder + b.decode('ascii')\n # Find the first frame delimiter\n i = parse_bytes.find('\\r\\n')\n while i >= 0:\n # Try to parse a single message\n m = self._parse_msg(parse_bytes[:i])\n # Remove parsed bytes and delimter\n parse_bytes = parse_bytes[i+2:]\n # Add parsed message, if any\n if m:\n msgs.append(m)\n self.logger.debug('Parsed ASCII frame: address={}, function={}, len={}'.format(m.address, m.function, len(m.data) if m.data else 0))\n #else - warn?\n i = parse_bytes.find('\\r\\n')\n # Store any remaining bytes for the next pass\n self.remainder = parse_bytes\n return msgs",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def getErrors(self) -> java.util.Collection:\n ...",
"def __msgtolist(self) -> List[str]:\n return self.msg.splitlines()",
"def get_received_messages(self):\n return self.received_messages",
"def validation_errors_to_error_messages(validation_errors):\n error_messages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n error_messages.append(f\"{field}: {error}\")\n return error_messages"
] | [
"0.780336",
"0.66833067",
"0.6297517",
"0.62860835",
"0.62860835",
"0.60984194",
"0.6095849",
"0.60497504",
"0.60270995",
"0.59741163",
"0.59682614",
"0.59034014",
"0.58919424",
"0.58919424",
"0.58919424",
"0.58700424",
"0.5852855",
"0.57940054",
"0.5786559",
"0.57755375",
"0.5728941",
"0.5728451",
"0.5728451",
"0.5728451",
"0.5728451",
"0.5728451",
"0.5719009",
"0.57163906",
"0.57159895",
"0.5709386"
] | 0.7126763 | 1 |
Gets the PCI devices. | def _get_pci_devices(self):
system = self._get_host_details()
if ('links' in system['Oem']['Hp'] and
'PCIDevices' in system['Oem']['Hp']['links']):
# Get the PCI URI and Settings
pci_uri = system['Oem']['Hp']['links']['PCIDevices']['href']
status, headers, pci_device_list = self._rest_get(pci_uri)
if status >= 300:
msg = self._get_extended_error(pci_device_list)
raise exception.IloError(msg)
return pci_device_list
else:
msg = ('links/PCIDevices section in ComputerSystem/Oem/Hp'
' does not exist')
raise exception.IloCommandNotSupportedError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_pci_device_list(self):\n pass",
"def _get_gpu_pci_devices(self):\n pci_device_list = self._get_pci_devices()\n\n gpu_list = []\n items = pci_device_list['Items']\n for item in items:\n if item['ClassCode'] in CLASSCODE_FOR_GPU_DEVICES:\n if item['SubclassCode'] in SUBCLASSCODE_FOR_GPU_DEVICES:\n gpu_list.append(item)\n return gpu_list",
"def get_devices(self):\n\n \"\"\"\n # Note: This code is no longer required with the latest spt updates.\n # But that said, leaving for now so I don't risk breaking folks!\n if not self._use_lsscsi:\n message = \"Find Number of IOM's\"\n command = \"lsscsi | fgrep enclo | egrep 'HGST|WDC' | wc -l\"\n pdata = self._run_command(command=command, message=message, logger=self._logger, shell=True)\n ioms = (int)(pdata['stdout'].strip())\n if ioms > 1:\n self._use_lsscsi = True\n if not self._use_lsscsi and os.path.exists('/etc/multipath.conf'):\n self._use_lsscsi = True\n \"\"\"\n # Allow above logic or options to override lsscsi vs. spt usage.\n if not self._use_lsscsi or self._force_spt:\n self.get_devices_spt()\n else:\n self.get_devices_lsscsi()\n return",
"def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices",
"def get_devices(self):\n return self.api_request('GET', self.url + '/device', {})",
"def devices(self):\n return self.enumerate_devices()",
"def get_all_devices(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_GetAllDevices', self.handle))",
"def get_devices(self):\n devices = []\n for i in self.devices:\n devices.append(self.devices[i])\n\n return devices",
"def get_devices(self):\n devices = self.get(\"event/device\")",
"def GetAllDevices(self):\n\n return list(self.YieldAllDevices())",
"def get_devices(self):\n return get_devices(self.api_key)",
"def list_devices(cls):\n # get all matching devices\n return usb.core.find(\n find_all=True,\n custom_match=lambda dev: (\n dev.idVendor == cls.vendor_id and dev.idProduct in cls.product_ids\n ),\n )",
"def devices(self):\n\t\t\tdevices = []\n\t\t\tnum = cuda.Device.count()\n\t\t\tfor id in range(num):\n\t\t\t\tname = cuda.Device(id).name()\n\t\t\t\tmemory = cuda.Device(id).total_memory()\n\t\t\t\tdevices.append((memory, name, id))\n\t\t\treturn devices",
"def get_devices(self):\n e = ctypes.POINTER(rs_error)()\n n_devices = lrs.rs_get_device_count(self.ctx, ctypes.byref(e))\n _check_error(e)\n\n lrs.rs_get_device.restype = ctypes.POINTER(rs_device)\n for idx in range(n_devices):\n dev = lrs.rs_get_device(self.ctx, idx, ctypes.byref(e))\n _check_error(e)\n\n name = pp(lrs.rs_get_device_name, dev, ctypes.byref(e))\n _check_error(e)\n\n serial = pp(lrs.rs_get_device_serial, dev, ctypes.byref(e))\n _check_error(e)\n\n version = pp(lrs.rs_get_device_firmware_version, dev, ctypes.byref(e))\n _check_error(e)\n\n is_streaming = lrs.rs_is_device_streaming(dev, ctypes.byref(e))\n _check_error(e)\n\n yield {'id': idx, 'name': name, 'serial': serial,\n 'firmware': version, 'is_streaming': is_streaming}",
"def devices(self, **kwargs):\n return self._get(API.DEVICES.value, check_202=True, **kwargs)",
"def devices(self):\n return self._devices",
"def devices(self):\n return self._devices",
"def devices(self):\n return self._devices",
"def devices(self):\n return self._devices",
"def get_devices(self): \n devices = []\n \n # get all the keys from the dictionary\n keys = self.SCPI_Data.keys()\n \n # extract the device specifier\n dev_keys = [key.split(':')[0] for key in keys]\n \n # iterate through the devices\n for key in dev_keys:\n if (key not in devices) and (key != 'SUP'):\n # this is a unique device, add it to the list\n devices = devices + [key]\n # end if\n # end for\n \n devices = devices + ['SIM']\n \n # replace the GPS if present with its longer name\n devices = ['GPSRM' if device == 'GPS' else device \n for device in devices]\n return devices",
"def list_devices(self):\n return [x for x in self.devices.keys()]",
"def devices(self):\n\n return self.__devices",
"def getDevices():\n devices = create_string_buffer(BUF_SIZE)\n daqmx(\n dll.DAQmxGetSysDevNames,\n (\n devices,\n BUF_SIZE\n )\n )\n return parseStringList(devices.value)",
"def get_available_devices(self):\n available_devices = []\n try:\n out = self.get_output(\"devices\")\n except Exception as e:\n logger.error(e)\n else:\n for line in out:\n device = self.parse_device_info(line)\n if device:\n available_devices.append(device)\n return available_devices",
"def devices(self):\n return {k:v for k, v in self._data.items() \n if v[\"type\"] == \"DEVICE\"}",
"def findDevices(self):\n devs = []\n for name, (serServer, port) in self.serialLinks.items():\n if serServer not in self.client.servers:\n continue\n server = self.client[serServer]\n ports = yield server.list_serial_ports()\n if port not in ports:\n continue\n devName = '%s - %s' % (serServer, port)\n devs += [(devName, (server, port))]\n returnValue(devs)",
"def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()",
"def findDevices(self):\n devs = []\n for name in self.serialLinks:\n port = self.serialLinks[name]\n if name not in self.client.servers:\n continue\n server = self.client[name]\n ports = yield server.list_serial_ports()\n print ports\n if port not in ports:\n continue\n devName = '%s - %s' % (name, port)\n devs += [(devName, (server, port))]\n returnValue(devs)",
"def get_devices():\n devices = []\n for path in hookenv.action_get('osd-devices').split(' '):\n path = path.strip()\n if not os.path.isabs(path):\n raise Error('{}: Not absolute path.'.format(path))\n devices.append(path)\n return devices",
"def devices(self):\n return list(self._device_types)"
] | [
"0.74077874",
"0.7218372",
"0.72180504",
"0.72177297",
"0.7194209",
"0.7167658",
"0.70619637",
"0.69890875",
"0.698407",
"0.69460297",
"0.6933677",
"0.6915167",
"0.688954",
"0.68429095",
"0.6825503",
"0.68069667",
"0.68069667",
"0.68069667",
"0.68069667",
"0.67960405",
"0.6794884",
"0.6740752",
"0.6725867",
"0.6676614",
"0.66740334",
"0.6669641",
"0.65504587",
"0.65294904",
"0.652689",
"0.65047586"
] | 0.8361106 | 0 |
Get the BIOS settings resource. | def _get_bios_settings_resource(self, data):
try:
bios_settings_uri = data['links']['Settings']['href']
except KeyError:
msg = ('BIOS Settings resource not found.')
raise exception.IloError(msg)
status, headers, bios_settings = self._rest_get(bios_settings_uri)
if status != 200:
msg = self._get_extended_error(bios_settings)
raise exception.IloError(msg)
return headers, bios_settings_uri, bios_settings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_bios_setting(self, bios_property):\n headers, bios_uri, bios_settings = self._check_bios_resource([\n bios_property])\n return bios_settings[bios_property]",
"def get_current_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n current_settings = sushy_system.bios.json\n except sushy.exceptions.SushyError as e:\n msg = (self._('The current BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n attributes = current_settings.get(\"Attributes\")\n return attributes",
"def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })",
"def _get_bios_boot_resource(self, data):\n try:\n boot_uri = data['links']['Boot']['href']\n except KeyError:\n msg = ('Boot resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, boot_settings = self._rest_get(boot_uri)\n\n if status != 200:\n msg = self._get_extended_error(boot_settings)\n raise exception.IloError(msg)\n\n return boot_settings",
"def get_pending_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n settings = sushy_system.bios.pending_attributes\n except sushy.exceptions.SushyError as e:\n msg = (self._('The pending BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n return settings",
"def settings(self):\r\n return SettingResource(self)",
"def settings():\n return _get_settings()[1]",
"def get_settings(self):\n url = \"https://api.imgur.com/3/account/{0}/settings\".format(self.name)\n return self._imgur._send_request(url)",
"def get_bios_settings(bmc):\n bios_settings = bmc.list_bios_settings()\n # Convert the settings to something that is JSON-serialisable.\n settings = {}\n for param, value in bios_settings.items():\n setting = {}\n # Not all attributes exist on all settings, so allow them to be absent.\n attrs = {\n 'current_value',\n 'pending_value',\n 'possible_values',\n }\n for attr in attrs:\n if hasattr(value, attr):\n setting[attr] = getattr(value, attr)\n settings[param] = setting\n return settings",
"def get(isamAppliance, check_mode=False, force=False, ignore_error=False):\n return isamAppliance.invoke_get(\"Retrieving a list of firmware settings\",\n \"/firmware_settings\", ignore_error=ignore_error, requires_model=requires_model)",
"def settings(self):\r\n url = '{0}/userSettings'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json",
"def advanced_settings(self):\n settings = ADVANCEDSETTINGS()\n ckresult(_dll.FMOD_System_GetAdvancedSettings(self._ptr, byref(settings)))\n return settings",
"def get_settings(self):\n return self.settings",
"def _load_settings(self):\n self._dll.LS_LoadSettings(self._serial_number)\n return None",
"def get_settings():\n with open('config/config.json') as data_file:\n settings = json.load(data_file)\n return settings",
"def get_settings_resource(res_type, abbr, res_name):\n\t\n\tif zen_settings.has_key(res_type):\n\t\tresource = zen_settings[res_type];\n\t\tif (has_deep_key(resource, [res_name, abbr])):\n\t\t\treturn resource[res_name][abbr]\n\t\telif 'extends' in resource:\n\t#\t\tfind abbreviation in ancestors\n\t\t\tfor v in resource['extends']:\n\t\t\t\tif has_deep_key(zen_settings, [v, res_name, abbr]):\n\t\t\t\t\treturn zen_settings[v][res_name][abbr]\n\treturn None;",
"def get_settings(self):\n settings = self.client._perform_json(\n \"GET\", \"/projects/%s/apiservices/%s/settings\" % (self.project_key, self.service_id))\n\n return DSSAPIServiceSettings(self.client, self.project_key, self.service_id, settings)",
"def get_settings():\n return db.get_data()",
"def get_settings():\n df = Struct(\n template=DYNAMICFORMS_BOOTSTRAP,\n )\n df = df.clone(**getattr(s, \"DYNAMICFORMS\", {}))\n template = df.template\n if template == DYNAMICFORMS_BOOTSTRAP:\n return SettingsBootstrap(**df.__to_dict__())\n return SettingsJqueryUI(**df.__to_dict__())",
"def device_setting(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"device_setting\"), kwargs)",
"def getSerialPortSettings(cls):\n return cls.serial_settings",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def get_settings(self):\n\n\t\t# TODO: Consider YAML. Human writable, machine readable.\n\t\twith open(self.filename) as fp:\n\t\t\ttry:\n\t\t\t\treturn json.load(fp)\n\t\t\texcept Exception, e:\n\t\t\t\tif self.DEBUG:\n\t\t\t\t\tprint >>sys.stderr, 'get_settings exception:', e\n\t\t\t\treturn {}",
"def get_settings():\n return SettingCollection.build()",
"def get(cls, client, name=\"\", option_=\"\") :\n\t\ttry :\n\t\t\tif not name :\n\t\t\t\tobj = appfwlearningsettings()\n\t\t\t\tresponse = obj.get_resources(client, option_)\n\t\t\telse :\n\t\t\t\tif type(name) is not list :\n\t\t\t\t\tif type(name) == cls :\n\t\t\t\t\t\traise Exception('Invalid parameter name:{0}'.format(type(name)))\n\t\t\t\t\tobj = appfwlearningsettings()\n\t\t\t\t\tobj.profilename = name\n\t\t\t\t\tresponse = obj.get_resource(client, option_)\n\t\t\t\telse :\n\t\t\t\t\tif name and len(name) > 0 :\n\t\t\t\t\t\tif type(name[0]) == cls :\n\t\t\t\t\t\t\traise Exception('Invalid parameter name:{0}'.format(type(name[0])))\n\t\t\t\t\t\tresponse = [appfwlearningsettings() for _ in range(len(name))]\n\t\t\t\t\t\tobj = [appfwlearningsettings() for _ in range(len(name))]\n\t\t\t\t\t\tfor i in range(len(name)) :\n\t\t\t\t\t\t\tobj[i] = appfwlearningsettings()\n\t\t\t\t\t\t\tobj[i].profilename = name[i]\n\t\t\t\t\t\t\tresponse[i] = obj[i].get_resource(client, option_)\n\t\t\treturn response\n\t\texcept Exception as e :\n\t\t\traise e",
"def find_settings():\n return Setting()",
"def GetFileCleanerSettings():\n obj = ndb.Key(FileCleanerSettings, FILE_CLEANER_SETTINGS_ID).get()\n return obj or DEFAULT_FILE_CLEANER_SETTINGS",
"def _get_bios_mappings_resource(self, data):\n try:\n map_uri = data['links']['Mappings']['href']\n except KeyError:\n msg = ('Mappings resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, map_settings = self._rest_get(map_uri)\n if status != 200:\n msg = self._get_extended_error(map_settings)\n raise exception.IloError(msg)\n\n return map_settings",
"def getCurrentSetting(self):\n return {}"
] | [
"0.6687052",
"0.66053444",
"0.6305347",
"0.62781394",
"0.6151722",
"0.6145869",
"0.5832804",
"0.56546223",
"0.5652844",
"0.5579022",
"0.5577148",
"0.55607617",
"0.5531982",
"0.5527874",
"0.54689676",
"0.5428091",
"0.5396962",
"0.53963387",
"0.53295165",
"0.53211266",
"0.52970964",
"0.52945626",
"0.52945626",
"0.527815",
"0.5273763",
"0.5271563",
"0.5270882",
"0.5264808",
"0.52625394",
"0.5255814"
] | 0.7173748 | 0 |
Check if the PATCH Operation is allowed on the resource. | def _validate_if_patch_supported(self, headers, uri):
if not self._operation_allowed(headers, 'PATCH'):
msg = ('PATCH Operation not supported on the resource '
'"%s"' % uri)
raise exception.IloError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_PATCH(self):\n if not self.url:\n return\n response = self.client.patch(self.url, {}, format='json')\n self.assertIn(response.status_code, [status.HTTP_405_METHOD_NOT_ALLOWED,\n status.HTTP_401_UNAUTHORIZED])",
"def test_partial_update_should_not_be_allowed(self):\n response = self.client.patch(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_client_can_do_patch_request(self):\n response = self.httpbin_4.test_requests_patch_method()\n self.assertEqual(response.request.method, 'PATCH')\n self.assertEqual(response.status_code, 200)",
"def test_update_should_not_be_allowed(self):\n response = self.client.put(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def _check_iscsi_rest_patch_allowed(self):\n\n headers, bios_uri, bios_settings = self._check_bios_resource()\n # Check if the bios resource exists.\n\n if('links' in bios_settings and 'iScsi' in bios_settings['links']):\n iscsi_uri = bios_settings['links']['iScsi']['href']\n status, headers, settings = self._rest_get(iscsi_uri)\n\n if status != 200:\n msg = self._get_extended_error(settings)\n raise exception.IloError(msg)\n\n if not self._operation_allowed(headers, 'PATCH'):\n headers, iscsi_uri, settings = (\n self._get_iscsi_settings_resource(settings))\n self._validate_if_patch_supported(headers, iscsi_uri)\n\n return iscsi_uri\n\n else:\n msg = ('\"links/iScsi\" section in bios'\n ' does not exist')\n raise exception.IloCommandNotSupportedError(msg)",
"def _check_patch_requirements(region_name,\n applied_patches=None,\n available_patches=None):\n\n api_token = None\n if applied_patches:\n patches_applied = patch_api.patch_is_applied(\n token=api_token,\n timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,\n region_name=region_name,\n patches=applied_patches\n )\n if not patches_applied:\n raise wsme.exc.ClientSideError(_(\n \"The following patches must be applied before doing \"\n \"the kubernetes upgrade: %s\" % applied_patches))\n\n if available_patches:\n patches_available = patch_api.patch_is_available(\n token=api_token,\n timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,\n region_name=region_name,\n patches=available_patches\n )\n if not patches_available:\n raise wsme.exc.ClientSideError(_(\n \"The following patches must be available before doing \"\n \"the kubernetes upgrade: %s\" %\n available_patches))",
"def can_be_modified(self):\n return self.state in {RequestState.pending, RequestState.accepted}",
"def has_update_permissions(self, obj):\n return True",
"def handle_patch(self, api, command):\n return self._make_request_from_command('PATCH', command)",
"def can_update_comments(self):\n # Implemented from template for\n # osid.resource.ResourceAdminSession.can_update_resources\n # NOTE: It is expected that real authentication hints will be\n # handled in a service adapter above the pay grade of this impl.\n return True",
"def test_unsupported_requests_fail(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.put(self.url)\n self.assertEqual(response.status_code, 405)\n\n response = self.client.patch(self.url)\n self.assertEqual(response.status_code, 405)",
"def test_patch_not_allowed(self, parse_args):\n parse_args.side_effect = [{\n _ATTEMPT.attempt_id: 'forbidden'\n }, {\n _ATTEMPT.run_id: 'forbidden'\n }]\n _, err = self.resource.patch(self.attempts[1][_ATTEMPT.attempt_id])\n self.assertEqual(403, err)",
"def _operation_allowed(self, headers_dict, operation):\n\n if 'allow' in headers_dict:\n if operation in headers_dict['allow']:\n return True\n return False",
"def is_catastrophic(self):\n if (self.request.method.upper() == 'PUT'\n and 'PLURAL_PUT' not in self.http_methods) \\\n or (self.request.method.upper() == 'DELETE'\n and 'PLURAL_DELETE' not in self.http_methods):\n return True\n return False",
"def test_method_not_allowed(self):\n resp = self.app.put(\n \"/customers\", \n json={\"not\": \"today\"}, \n content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)",
"def test_review_list_other_method_not_allowed(self):\n client = Client()\n client.login(username='TEST_USER_1',\n email='TEST_EMAIL_1', password='TEST_PW_1')\n response = client.put('/api/review/')\n\n self.assertEqual(response.status_code, 405)",
"def partial_update(self, request, pk=None):\n return Response({'http_method':'PATCH'})",
"def patch(resource, data, **kwargs):\n\tresp = requests.patch(\n\t\t_endpoint(resource, 'PATCH'),\n\t\tparams=_jsonify_dict_values(kwargs),\n\t\tdata=json.dumps(data),\n\t\theaders=PAYLOAD_HEADERS,\n\t\tverify=SERVER_CERT\n\t)\n\tresp.raise_for_status()\n\treturn resp.json()",
"def test_validate_patch(client):\n response = client.patch(\n '/user/1',\n data=json.dumps({\n 'name': 'Jeff Knupp',\n }),\n headers={'Content-Type': 'application/json'}\n )\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE",
"def assertHttpMethodNotAllowed(self, resp):\r\n return self.assertEqual(resp.status_code, 405)",
"def partial_update(self, request, pk=None):\n\n return Response({'http_method': 'PATCH'})",
"def is_patched(self) -> bool:\n client = Client()\n # Get the relevant service from the cluster\n service = client.get(Service, name=self.service_name, namespace=self._namespace)\n # Construct a list of expected ports, should the patch be applied\n expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports]\n # Construct a list in the same manner, using the fetched service\n fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] # type: ignore[attr-defined] # noqa: E501\n return expected_ports == fetched_ports",
"def test_unsupported_request_methods(self):\n unsupported_methods = [\"POST\", \"PUT\", \"PATCH\", \"DELETE\"]\n for method_name in unsupported_methods:\n with self.subTest(method_name=method_name):\n request_method = getattr(self.client, method_name.lower())\n response = request_method(self.url)\n self.assertEqual(response.status_code, 405)",
"def partial_update(self, request, pk=None):\n\n return Response({'http_method':'PATCH'})",
"def patch(self , request , pk = None ):\r\n return Response({'method':'patch'})",
"def test_patch_request_by_non_owner(self):\n client = APIClient()\n client.credentials(HTTP_AUTHORIZATION=self.test_user2_token)\n response = client.post('/api/places/', self.restaurant_data, format='json')\n url = f\"/api/places/{response.data['id']}/\"\n\n client.credentials(HTTP_AUTHORIZATION=self.test_user1_token)\n response = client.patch(url, self.restaurant_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)",
"def patch(self, resource, data, target=None, verb='patch', **kwargs):\n return self._modify_resource(resource, data, target, verb, **kwargs)",
"def is_update(self):\n return self.action in [\"update\", \"partial_update\"]",
"def check_method_allowed(cls, request):\r\n if not request.method in cls._meta.allowed_methods:\r\n raise HttpError(\r\n 'Method \\'%s\\' not allowed on this resource.' % request.method,\r\n status=status.HTTP_405_METHOD_NOT_ALLOWED)",
"def has_change_permissions_permission(self, request):\n return self.has_generic_permission(request, \"change_permissions\")"
] | [
"0.69640553",
"0.6944961",
"0.6830189",
"0.6657633",
"0.65127224",
"0.63872164",
"0.6329733",
"0.62977004",
"0.61410993",
"0.6072097",
"0.5949872",
"0.5900068",
"0.58420664",
"0.58221006",
"0.5762764",
"0.57505614",
"0.5717169",
"0.571581",
"0.56980234",
"0.5684516",
"0.56841666",
"0.56760865",
"0.5669029",
"0.5661743",
"0.56611824",
"0.5661169",
"0.5660552",
"0.5647885",
"0.56432146",
"0.56142247"
] | 0.803524 | 0 |
Retrieves bios settings of the server. | def _get_bios_setting(self, bios_property):
headers, bios_uri, bios_settings = self._check_bios_resource([
bios_property])
return bios_settings[bios_property] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_bios_settings_resource(self, data):\n try:\n bios_settings_uri = data['links']['Settings']['href']\n except KeyError:\n msg = ('BIOS Settings resource not found.')\n raise exception.IloError(msg)\n\n status, headers, bios_settings = self._rest_get(bios_settings_uri)\n if status != 200:\n msg = self._get_extended_error(bios_settings)\n raise exception.IloError(msg)\n\n return headers, bios_settings_uri, bios_settings",
"def get_current_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n current_settings = sushy_system.bios.json\n except sushy.exceptions.SushyError as e:\n msg = (self._('The current BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n attributes = current_settings.get(\"Attributes\")\n return attributes",
"def get_bios_settings(bmc):\n bios_settings = bmc.list_bios_settings()\n # Convert the settings to something that is JSON-serialisable.\n settings = {}\n for param, value in bios_settings.items():\n setting = {}\n # Not all attributes exist on all settings, so allow them to be absent.\n attrs = {\n 'current_value',\n 'pending_value',\n 'possible_values',\n }\n for attr in attrs:\n if hasattr(value, attr):\n setting[attr] = getattr(value, attr)\n settings[param] = setting\n return settings",
"def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })",
"def _get_bios_boot_resource(self, data):\n try:\n boot_uri = data['links']['Boot']['href']\n except KeyError:\n msg = ('Boot resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, boot_settings = self._rest_get(boot_uri)\n\n if status != 200:\n msg = self._get_extended_error(boot_settings)\n raise exception.IloError(msg)\n\n return boot_settings",
"def get_pending_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n settings = sushy_system.bios.pending_attributes\n except sushy.exceptions.SushyError as e:\n msg = (self._('The pending BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n return settings",
"def fusion_api_get_server_hardware_bios(self, uri, api=None, headers=None):\n return self.sh.get(uri=uri, api=api, headers=headers, param='/bios')",
"def get_settings(self):\n url = \"https://api.imgur.com/3/account/{0}/settings\".format(self.name)\n return self._imgur._send_request(url)",
"def get_common_settings(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Settings/\"))",
"def get_nic_settings(bmc):\n nic_settings = bmc.list_nics()\n return nic_settings",
"def settings(self):\r\n url = '{0}/userSettings'.format(self.get_url())\r\n return http.Request('GET', url), parsers.parse_json",
"async def test_get_settings(spawn_client):\n client = await spawn_client(authorize=True)\n\n resp = await client.get(\"/account/settings\")\n\n assert resp.status == 200\n\n assert await resp.json() == {\n \"skip_quick_analyze_dialog\": True,\n \"show_ids\": True,\n \"show_versions\": True,\n \"quick_analyze_workflow\": \"pathoscope_bowtie\",\n }",
"def set_bios_bootmode_uefi(ip, login_account, login_password, system_id):\n result = {}\n login_host = \"https://\" + ip\n try:\n # Connect using the BMC address, account name, and password\n # Create a REDFISH object\n REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account, timeout=utils.g_timeout,\n password=login_password, default_prefix='/redfish/v1', cafile=utils.g_CAFILE)\n # Login into the server and create a session\n REDFISH_OBJ.login(auth=utils.g_AUTH)\n except:\n traceback.print_exc()\n result = {'ret': False, 'msg': \"Please check the username, password, IP is correct\"}\n return result\n\n # GET the ComputerSystem resource\n system = utils.get_system_url(\"/redfish/v1\", system_id, REDFISH_OBJ)\n if not system:\n result = {'ret': False, 'msg': \"This system id is not exist or system member is None\"}\n REDFISH_OBJ.logout()\n return result\n for i in range(len(system)):\n system_url = system[i]\n response_system_url = REDFISH_OBJ.get(system_url, None)\n if response_system_url.status != 200:\n error_message = utils.get_extended_error(response_system_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (system_url, response_system_url.status, error_message)}\n REDFISH_OBJ.logout()\n return result\n else:\n # Get the bios resource\n bios_url = response_system_url.dict['Bios']['@odata.id']\n response_bios_url = REDFISH_OBJ.get(bios_url, None)\n if response_bios_url.status != 200:\n error_message = utils.get_extended_error(response_bios_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (bios_url, response_bios_url.status, error_message)}\n REDFISH_OBJ.logout()\n return result\n else: # Get bios success\n # Seek boot mode from bios attributes\n attribute_bootmode = None\n attributes = response_bios_url.dict['Attributes']\n for attribute in attributes:\n if attribute == \"BootMode\" or attribute == \"SystemBootMode\":\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n for attribute in attributes:\n if \"SystemBootMode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n for attribute in attributes:\n if \"Boot\" in attribute and \"Mode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n result = {'ret': False, 'msg': \"Can not found BootMode attribute in response of url %s\" %(bios_url)}\n REDFISH_OBJ.logout()\n return result\n\n # Get boot mode setting guide from bios registry\n WarningText = None\n ValueName = None\n bios_registry_url = \"/redfish/v1/Registries/\" + response_bios_url.dict['AttributeRegistry']\n response_bios_registry_url = REDFISH_OBJ.get(bios_registry_url, None)\n if response_bios_registry_url.status == 200:\n locations = response_bios_registry_url.dict['Location']\n bios_regjson_url = None\n for location in locations:\n if 'en' in location['Language']:\n bios_regjson_url = location['Uri']\n break\n if bios_regjson_url:\n response_bios_regjson_url = REDFISH_OBJ.get(bios_regjson_url, None)\n if response_bios_regjson_url.status == 200:\n regattributes = response_bios_regjson_url.dict['RegistryEntries']['Attributes']\n for regattribute in regattributes:\n if regattribute['AttributeName'] == attribute_bootmode:\n if 'WarningText' in regattribute:\n WarningText = regattribute['WarningText']\n for value in regattribute['Value']:\n if 'legacy' in value['ValueName'].lower():\n continue\n if 'uefi' in value['ValueName'].lower():\n ValueName = value['ValueName']\n break\n ValueName = value['ValueName']\n break\n \n # Perform patch to set\n if ValueName == None:\n ValueName = \"UEFIMode\"\n pending_url = response_bios_url.dict['@Redfish.Settings']['SettingsObject']['@odata.id']\n parameter = {attribute_bootmode: ValueName}\n attribute = {\"Attributes\": parameter}\n headers = {\"If-Match\": '*'}\n response_pending_url = REDFISH_OBJ.patch(pending_url, body=attribute, headers=headers)\n if response_pending_url.status in [200,204]:\n if WarningText:\n result = {'ret': True, 'msg': 'set bios bootmode uefi successful. WarningText: %s'% (WarningText) }\n else:\n result = {'ret': True, 'msg': 'set bios bootmode uefi successful'}\n elif response_pending_url.status == 405:\n result = {'ret': False, 'msg': \"Resource not supported\"}\n else:\n error_message = utils.get_extended_error(response_pending_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (\n pending_url, response_pending_url.status, error_message)}\n\n # Logout of the current session\n try:\n REDFISH_OBJ.logout()\n except:\n pass\n return result",
"def get():\n\n mba_ctrl_info = caps.mba_ctrl_info()\n\n res = {\n 'supported': mba_ctrl_info['supported'],\n 'enabled': mba_ctrl_info['enabled']\n }\n return res, 200",
"def _get_bios_mappings_resource(self, data):\n try:\n map_uri = data['links']['Mappings']['href']\n except KeyError:\n msg = ('Mappings resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, map_settings = self._rest_get(map_uri)\n if status != 200:\n msg = self._get_extended_error(map_settings)\n raise exception.IloError(msg)\n\n return map_settings",
"async def get_system_info(self) -> Dict[str, Any]:\n assert self._client is not None\n return await self._client.invoke_method(\"system.info\")",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def get_settings():\n settings = {}\n for setting in cfg.displayable_setting:\n settings[setting] = getattr(cfg, setting)\n return settings",
"def ex_get_hypervisor_sysinfo(self):\n xml = self.connection.getSysinfo()\n etree = ET.XML(xml)\n\n attributes = [\"bios\", \"system\", \"processor\", \"memory_device\"]\n\n sysinfo = {}\n for attribute in attributes:\n element = etree.find(attribute)\n entries = self._get_entries(element=element)\n sysinfo[attribute] = entries\n\n return sysinfo",
"def _check_bios_resource(self, properties=[]):\n\n system = self._get_host_details()\n if ('links' in system['Oem']['Hp'] and\n 'BIOS' in system['Oem']['Hp']['links']):\n # Get the BIOS URI and Settings\n bios_uri = system['Oem']['Hp']['links']['BIOS']['href']\n status, headers, bios_settings = self._rest_get(bios_uri)\n\n if status >= 300:\n msg = self._get_extended_error(bios_settings)\n raise exception.IloError(msg)\n\n # If property is not None, check if the bios_property is supported\n for property in properties:\n if property not in bios_settings:\n # not supported on this platform\n msg = ('BIOS Property \"' + property + '\" is not'\n ' supported on this system.')\n raise exception.IloCommandNotSupportedError(msg)\n\n return headers, bios_uri, bios_settings\n\n else:\n msg = ('\"links/BIOS\" section in ComputerSystem/Oem/Hp'\n ' does not exist')\n raise exception.IloCommandNotSupportedError(msg)",
"def get_srv_config(name):\n cmd = \"ceph --admin-daemon %s/%s.asok config show\" % \\\n (CEPH_SOCKET_PATH, name)\n out = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, \\\n stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n return json.loads(out.stdout.read())",
"def cmd_account_settings(client, args):\n account_settings = client.get_account_settings(args.username)\n data = account_settings.__dict__\n generate_output({'account_settings': data})",
"def get_settings():\n return db.get_data()",
"def retrieveGuildsInfo(self):\n serverInfo = self.con.getGuildsInfo()\n\n for server in serverInfo:\n serverData = server.split(', ')\n self.serverSettings[serverData[0]] = serverData[1]",
"def get_config(self, retrieve=\"all\", full=False, sanitized=False):\n\n command = \"/export verbose\" if full else \"/export\"\n\n running_config = self._send_command(command)\n running_config = re.sub(r'^#.*$', \"\", running_config, flags=re.M)\n\n return {\n \"startup\": \"\",\n \"running\": running_config.strip(),\n \"candidate\": \"\"\n }",
"async def economyset_showsettings(self, ctx: commands.Context):\r\n guild = ctx.guild\r\n if await bank.is_global():\r\n conf = self.config\r\n else:\r\n conf = self.config.guild(guild)\r\n await ctx.send(\r\n box(\r\n _(\r\n \"----Economy Settings---\\n\"\r\n \"Minimum slot bid: {slot_min}\\n\"\r\n \"Maximum slot bid: {slot_max}\\n\"\r\n \"Slot cooldown: {slot_time}\\n\"\r\n \"Payday amount: {payday_amount}\\n\"\r\n \"Payday cooldown: {payday_time}\\n\"\r\n \"Amount given at account registration: {register_amount}\\n\"\r\n \"Maximum allowed balance: {maximum_bal}\"\r\n ).format(\r\n slot_min=humanize_number(await conf.SLOT_MIN()),\r\n slot_max=humanize_number(await conf.SLOT_MAX()),\r\n slot_time=humanize_number(await conf.SLOT_TIME()),\r\n payday_time=humanize_number(await conf.PAYDAY_TIME()),\r\n payday_amount=humanize_number(await conf.PAYDAY_CREDITS()),\r\n register_amount=humanize_number(await bank.get_default_balance(guild)),\r\n maximum_bal=humanize_number(await bank.get_max_balance(guild)),\r\n )\r\n )\r\n )",
"def __get_base_info_api(self):\r\n try:\r\n return Call_shelly_api(url=self.__api_address + \"/settings\")\r\n except ShellyException as err:\r\n _LOGGER.warning(err)",
"def get_config(site='self'):\n path='/sites/%s/configuration' % (site)\n return _api_request('GET', path)",
"def GetAWSSettings(self):\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/cloud-connect-aws/GetAWSSettings\n FULL_URL = self.base_url+'/cloud-connect-aws/combined/settings/v1'\n HEADERS = self.headers\n result = self.Result()\n try:\n response = requests.request(\"GET\", FULL_URL, headers=HEADERS, verify=False)\n returned = result(response.status_code, response.headers, response.json())\n except Exception as e:\n returned = result(500, {}, str(e))\n \n return returned",
"def get():\n\n mba_info = caps.mba_info()\n\n res = {\n 'clos_num': mba_info['clos_num'],\n 'mba_enabled': mba_info['enabled'],\n 'mba_bw_enabled': mba_info['ctrl_enabled']\n }\n return res, 200"
] | [
"0.70597833",
"0.67072386",
"0.66295433",
"0.63893646",
"0.6140606",
"0.59224844",
"0.58876735",
"0.57552373",
"0.57381696",
"0.5689282",
"0.5650241",
"0.55698514",
"0.5555418",
"0.55383116",
"0.54772294",
"0.5444395",
"0.5402679",
"0.5402679",
"0.53762496",
"0.5365478",
"0.5358037",
"0.535643",
"0.5350886",
"0.53149605",
"0.5313657",
"0.5291689",
"0.527867",
"0.5252195",
"0.5247421",
"0.52301615"
] | 0.6843512 | 1 |
Get the iscsi settings resoure. | def _get_iscsi_settings_resource(self, data):
try:
iscsi_settings_uri = data['links']['Settings']['href']
except KeyError:
msg = ('iscsi settings resource not found.')
raise exception.IloCommandNotSupportedError(msg)
status, headers, iscsi_settings = self._rest_get(iscsi_settings_uri)
if status != 200:
msg = self._get_extended_error(iscsi_settings)
raise exception.IloError(msg)
return headers, iscsi_settings_uri, iscsi_settings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })",
"def get_settings(self):\n return self.settings",
"def get_settings(self):\n url = \"https://api.imgur.com/3/account/{0}/settings\".format(self.name)\n return self._imgur._send_request(url)",
"def settings():\n return _get_settings()[1]",
"def get_resource_config(target=False, force=None):\n return get_stored_property(ctx, 'resource_config', target, force)",
"def get_common_settings(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Settings/\"))",
"def get_settings(self):\n return AlgoliaUtils_request(self.client.headers, self.read_hosts, \"GET\", \"/1/indexes/%s/settings\" % self.url_index_name, self.client.timeout)",
"def _get_conf(self):\n self.press_conf = self.sysconf['PressureRegulators']\n return self.press_conf['PressureRegulator%d' % self.id_]",
"def _get_bios_settings_resource(self, data):\n try:\n bios_settings_uri = data['links']['Settings']['href']\n except KeyError:\n msg = ('BIOS Settings resource not found.')\n raise exception.IloError(msg)\n\n status, headers, bios_settings = self._rest_get(bios_settings_uri)\n if status != 200:\n msg = self._get_extended_error(bios_settings)\n raise exception.IloError(msg)\n\n return headers, bios_settings_uri, bios_settings",
"def get_settings():\n return db.get_data()",
"def get_raw(self):\n return self.settings",
"def get_raw(self):\n return self.settings",
"def get_raw(self):\n return self.settings",
"def get_raw(self):\n return self.settings",
"def getSettings(self):\n return self.cfg",
"def settings(self) -> Optional[pulumi.Input['ConfigurationServiceSettingsArgs']]:\n return pulumi.get(self, \"settings\")",
"def getResolution(self):\n # load it each time, since this setting is not limited to a single user\n projectSettingsDB = self.loadProjectSettings()\n try:\n resolution = projectSettingsDB[\"Resolution\"]\n return resolution\n except KeyError:\n msg = \"Database Error while reading projectSettings.json\"\n logger.error(msg)\n return None",
"def settings(self):\n return self._settings",
"def settings(self):\n return self._settings",
"def requested_config_vals():\n return {'transfer_stats_per_file':'opt'}",
"def settings(self) -> BaseSettings:\n return self._context.settings",
"def settings(self) -> BaseSettings:\n return self._context.settings",
"def ivy_settings(self):\r\n return self._ivy_settings",
"def get_config(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVm_GetConfig', self.handle))",
"def get_srv_config(self):\n\t\treturn Job(SDK.PrlSrv_GetSrvConfig(self.handle)[0])",
"def GetAWSSettings(self):\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/cloud-connect-aws/GetAWSSettings\n FULL_URL = self.base_url+'/cloud-connect-aws/combined/settings/v1'\n HEADERS = self.headers\n result = self.Result()\n try:\n response = requests.request(\"GET\", FULL_URL, headers=HEADERS, verify=False)\n returned = result(response.status_code, response.headers, response.json())\n except Exception as e:\n returned = result(500, {}, str(e))\n \n return returned",
"def get_config(self):\n if self.allow_reco():\n return self.chs_config()\n else:\n return self.get_config_j(self.id)",
"def get_current_bios_settings(self, only_allowed_settings=True):\n\n sushy_system = self._get_sushy_system()\n try:\n current_settings = sushy_system.bios.json\n except sushy.exceptions.SushyError as e:\n msg = (self._('The current BIOS Settings were not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n\n attributes = current_settings.get(\"Attributes\")\n return attributes",
"def _get_trs_opts(service_id):\n return trs_config()[service_id]",
"def getreplicationsettings(self):\n d = {}\n try:\n con = hcpsdk.Connection(self.target, debuglevel=self.debuglevel)\n except Exception as e:\n raise hcpsdk.HcpsdkError(str(e))\n else:\n self.connect_time = con.connect_time\n try:\n r = con.GET('/mapi/services/replication')\n except Exception as e:\n raise hcpsdk.HcpsdkError(str(e))\n else:\n if r.status == 200:\n # Good status, get and parse the Response\n x = r.read()\n self.service_time = con.service_time2\n for child in Et.fromstring(x):\n d[child.tag] = child.text\n else:\n raise (hcpsdk.HcpsdkError('{} - {}'.format(r.status, r.reason)))\n finally:\n # noinspection PyUnboundLocalVariable\n con.close()\n\n return d"
] | [
"0.6224379",
"0.5955814",
"0.59340453",
"0.5871603",
"0.5869005",
"0.5833151",
"0.57728857",
"0.57651824",
"0.57078904",
"0.57052916",
"0.5703846",
"0.5703846",
"0.5703846",
"0.5703846",
"0.56948245",
"0.5670498",
"0.5628336",
"0.56175953",
"0.56175953",
"0.5589727",
"0.5574095",
"0.5574095",
"0.5556145",
"0.55392355",
"0.55094105",
"0.5488156",
"0.5481084",
"0.5457733",
"0.54547757",
"0.54328203"
] | 0.7113912 | 0 |
Get the Boot resource like BootSources. | def _get_bios_boot_resource(self, data):
try:
boot_uri = data['links']['Boot']['href']
except KeyError:
msg = ('Boot resource not found.')
raise exception.IloCommandNotSupportedError(msg)
status, headers, boot_settings = self._rest_get(boot_uri)
if status != 200:
msg = self._get_extended_error(boot_settings)
raise exception.IloError(msg)
return boot_settings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_persistent_boot_devices(self):\n # Check if the BIOS resource if exists.\n headers_bios, bios_uri, bios_settings = self._check_bios_resource()\n\n # Get the Boot resource.\n boot_settings = self._get_bios_boot_resource(bios_settings)\n\n # Get the BootSources resource\n try:\n boot_sources = boot_settings['BootSources']\n except KeyError:\n msg = (\"BootSources resource not found.\")\n raise exception.IloError(msg)\n\n try:\n boot_order = boot_settings['PersistentBootConfigOrder']\n except KeyError:\n msg = (\"PersistentBootConfigOrder resource not found.\")\n raise exception.IloCommandNotSupportedError(msg)\n\n return boot_sources, boot_order",
"def Sources():\n return _sources",
"def get_resource_loader(self):\n return self.game.resource_loader",
"def resources(self) -> pulumi.Output[Sequence['outputs.MachineExtensionResponse']]:\n return pulumi.get(self, \"resources\")",
"def resources(self):\n return self.__resources",
"def boot_configuration(self):\n bootconfs = self.get_logical_configuration(gdef.BOOT_LOG_CONF)\n if not bootconfs:\n return bootconfs\n assert len(bootconfs) == 1 # Only one boot configuration can exist for each device instance.\n return bootconfs[0]",
"def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res",
"def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res",
"def get_resources():\r\n global __res\r\n if __res == None:\r\n __init_resources()\r\n return __res",
"def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res",
"def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res",
"def get_resources():\n global __res\n if __res == None:\n __init_resources()\n return __res",
"def get_resources(self):\n return []",
"def get_boot_driver(self):\n return self._boot_driver",
"def sources(self) -> Optional[Sequence['outputs.AddressPrefixItemResponse']]:\n return pulumi.get(self, \"sources\")",
"def resource_mapping():\n return {\n 'OS::Heat::ResourceChain': ResourceChain,\n }",
"def resources(self):\n return [self]",
"def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n inits.append(line)\n self.unmount()\n if not inits:\n run_error(_(\"No initramfs found\"))\n return None\n if not kernel:\n run_error(_(\"GRUB problem:\\n\") + inits[0])\n return None\n return (kernel, inits)",
"def sources(self) -> Sequence[Any]:\n return pulumi.get(self, \"sources\")",
"def _get_source_rd(self):\n return self.__source_rd",
"def lookup(self):\r\n return resources.Lookup(self)",
"def source(self) -> XMLResource:\n return self.schema.source",
"def get_bokeh_resources() -> TemplateResourcesData:\n template_resources = TemplateResourcesData()\n template_resources.js = CDN.js_files[0]\n template_resources.css = CDN.css_files[0]\n\n return template_resources",
"def get_resource(self):\n from rowgenerators import parse_app_url # Here, to break an import cycle\n\n self._resource = self._downloader.download(self.inner)\n\n\n ru = parse_app_url(self._resource.sys_path,\n downloader=self.downloader,\n scheme_extension=self.scheme_extension,\n **self.frag_dict)\n\n\n return ru",
"def BootstrapBootstrap (name):\n module = sys.modules [__name__]\n return BootstrapSource (name, inspect.getsource (module), inspect.getsourcefile (module))",
"def getResource(self):\n return self.serviceClass.app.resource()",
"def get_resource(self):\n return self._stores",
"def get_power_source(self):\n self._info(\"get_power_source\")\n response = self.parent.power_manager.get_power_source()\n if response is not None:\n response = response[0]\n self.parent.controller.get_power_source_cb(response)\n return response",
"def sources(self):\n return self._sources",
"def resource_map(self):"
] | [
"0.5812557",
"0.5714252",
"0.5702293",
"0.5645948",
"0.5568216",
"0.5541851",
"0.5470517",
"0.5470517",
"0.5470517",
"0.54481995",
"0.54481995",
"0.54481995",
"0.54442555",
"0.54271823",
"0.5424539",
"0.53723615",
"0.5370766",
"0.5336021",
"0.530553",
"0.5278246",
"0.5265546",
"0.5231954",
"0.5229536",
"0.52242064",
"0.5203322",
"0.51976836",
"0.5185501",
"0.5144898",
"0.51378727",
"0.5126003"
] | 0.5864545 | 0 |
Get the Mappings resource. | def _get_bios_mappings_resource(self, data):
try:
map_uri = data['links']['Mappings']['href']
except KeyError:
msg = ('Mappings resource not found.')
raise exception.IloCommandNotSupportedError(msg)
status, headers, map_settings = self._rest_get(map_uri)
if status != 200:
msg = self._get_extended_error(map_settings)
raise exception.IloError(msg)
return map_settings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mapping(self):\n return self._mapping",
"def get_mapping(self, ksf: str) -> InfoResMapping:\n irm = self.InfoResMapping(self, ksf)\n return irm",
"def getMapping(self):\n self._process()\n return self._mapping",
"def mapping(self):\n return self.request('_mapping', pylastica.request.Request.GET).data",
"def resource_map(self):",
"def get_mapping(self):\n if self.role:\n return self.role.get_mapping(self.mapping)\n\n return self.mapping",
"def map( self ) :\n\n self.readMap( )\n\n return( self.__map )",
"def mapping_properties(self) -> pulumi.Output['outputs.ConnectorMappingPropertiesResponse']:\n return pulumi.get(self, \"mapping_properties\")",
"def path_mapping(self) -> Optional[Sequence['outputs.ContentPathMapResponse']]:\n return pulumi.get(self, \"path_mapping\")",
"def get_map(self):\n return self.parent.controller.get_map()",
"def mappings(self) -> pulumi.Output[Optional[Sequence['outputs.TypePropertiesMappingResponse']]]:\n return pulumi.get(self, \"mappings\")",
"def readMap( self ) :\n\n if self.__map is None:\n mapFilePath = pathlib.Path(self.path)\n if not mapFilePath.is_absolute():\n mapFilePath = self.derivedPath / mapFilePath\n self.__map = Map.readXML_file(mapFilePath)\n self.__map.setAncestor(self)\n\n return self.__map",
"def mapped(self):\n return self.__mapped",
"def get(self):\n maps = Map.all()\n results = [map_object.serialize() for map_object in maps]\n return results, status.HTTP_200_OK",
"def get_map(self):\n return self.map",
"def get_map(self):\n return self.get_raw_ys()",
"def request_map():\n\n rospy.loginfo(\"Requesting the map\")\n rospy.wait_for_service('dynamic_map')\n getMap = rospy.ServiceProxy('dynamic_map', GetMap)\n g = getMap().map\n\n return g",
"def get_object_mappings(self):\n self.logger.debug(\"Requesting object mappings\")\n sm = yield self.omap.get_trap_mappings(config.pool)\n if sm != self.source_map:\n self.logger.debug(\"Setting object mappings to: %s\", sm)\n self.source_map = sm",
"def get_map(self) -> list:\n return self.map_obstacle",
"def _get_route_map(self):\n return self.__route_map",
"def get_resources(self):\n return []",
"def get_mapping(self, index):\n url = \"{url_home}/{index}/{function}\".format(url_home=self.url_elastic, index=index, function=\"_mapping\")\n res = rw.get(url, headers=self.headers)\n return res",
"def mapping_properties(self) -> pulumi.Input['ConnectorMappingPropertiesArgs']:\n return pulumi.get(self, \"mapping_properties\")",
"def map(self) -> Map:\n return self._map",
"def schema_mappings(self):\n pass",
"def resources(self):\n return self.__resources",
"def get_map(self):\n return self._locmap",
"def get_current_mappings(self):\n return {name: getattr(self, name) for name in self.__mapped_names}",
"def resources(self):\n return [self]",
"def MAP(self):\n return self.__map"
] | [
"0.673186",
"0.6723476",
"0.67003196",
"0.6668532",
"0.6564718",
"0.6539853",
"0.6527213",
"0.6339044",
"0.63287383",
"0.6280277",
"0.62163085",
"0.60885906",
"0.607769",
"0.6042368",
"0.6038287",
"0.60309374",
"0.60278445",
"0.5881427",
"0.585224",
"0.5841346",
"0.58215725",
"0.57991636",
"0.57975084",
"0.5745911",
"0.57450783",
"0.5743332",
"0.5736136",
"0.5704447",
"0.5702205",
"0.57018113"
] | 0.6955971 | 0 |
Checks if patch is supported on iscsi. | def _check_iscsi_rest_patch_allowed(self):
headers, bios_uri, bios_settings = self._check_bios_resource()
# Check if the bios resource exists.
if('links' in bios_settings and 'iScsi' in bios_settings['links']):
iscsi_uri = bios_settings['links']['iScsi']['href']
status, headers, settings = self._rest_get(iscsi_uri)
if status != 200:
msg = self._get_extended_error(settings)
raise exception.IloError(msg)
if not self._operation_allowed(headers, 'PATCH'):
headers, iscsi_uri, settings = (
self._get_iscsi_settings_resource(settings))
self._validate_if_patch_supported(headers, iscsi_uri)
return iscsi_uri
else:
msg = ('"links/iScsi" section in bios'
' does not exist')
raise exception.IloCommandNotSupportedError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _validate_if_patch_supported(self, headers, uri):\n if not self._operation_allowed(headers, 'PATCH'):\n msg = ('PATCH Operation not supported on the resource '\n '\"%s\"' % uri)\n raise exception.IloError(msg)",
"def check_supported_features(self):",
"def test_patch_hyperflex_server_firmware_version(self):\n pass",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def test_guest_os(self):\n self.check_guest_os()",
"def is_ida_version_supported():\n major, minor = map(int, idaapi.get_kernel_version().split(\".\"))\n if major >= 7:\n return True\n print(\"GhIDA:: [!] IDA Pro 7.xx supported only\")\n return False",
"def verify_support():\n ostype, majorrelease, _ = get_os_release_data()\n if ostype not in _supported_os:\n _logger.info('OS type %s is not supported.', ostype)\n return False\n if majorrelease not in _supported_release:\n _logger.info('OS %s %s is not supported', ostype, majorrelease)\n return False\n return True",
"def is_ctu_capable():\n\n context = package_context.get_context()\n ctu_func_map_cmd = context.ctu_func_map_cmd\n try:\n version = subprocess.check_output([ctu_func_map_cmd, '-version'])\n except (subprocess.CalledProcessError, OSError):\n version = 'ERROR'\n return version != 'ERROR'",
"def test_patch_hyperflex_capability_info(self):\n pass",
"def testCheckAvailable(self):\n img = self.img\n img.inspect()\n with converter.RootMounted(img.converter._h,\n '/dev/VolGroup00/LogVol00'):\n c = img.converter\n installer = redhat.LocalInstaller(\n c._h, '/dev/VolGroup00/LogVol00',\n db.DB(['{}/conf/guestconv.db'.format(env.topdir)]),\n log.get_logger_object(test_helper.logger)\n )\n\n kernel = redhat.Package('kernel',\n version='2.6.9', release='89.EL',\n arch='i686')\n self.assertTrue(installer.check_available([kernel]))",
"def supported():\n return os.path.isfile(OPENCOR)",
"def is_system_usable_block_device(pydev_device):\n if pydev_device.get(\"ID_BUS\") == \"usb\":\n # Skip USB devices\n return False\n if pydev_device.get(\"DM_VG_NAME\") or pydev_device.get(\"DM_LV_NAME\"):\n # Skip LVM devices\n return False\n if constants.DEVICE_NAME_MPATH in pydev_device.get(\"DM_NAME\", \"\") and pydev_device.get(\"DM_PART\", \"\"):\n # Skip mpath partition devices\n return False\n if pydev_device.get(\"ID_FS_TYPE\") == constants.DEVICE_FS_TYPE_MPATH:\n # Skip mpath member devices\n return False\n id_path = pydev_device.get(\"ID_PATH\", \"\")\n if \"iqn.\" in id_path or \"eui.\" in id_path:\n # Skip all iSCSI devices, they are links for volume storage.\n # As per https://www.ietf.org/rfc/rfc3721.txt, \"iqn.\" or \"edu.\"\n # have to be present when constructing iSCSI names.\n return False\n if ((\"-fc-\" in id_path or \"-lun-\" in id_path) and\n is_valid_multipath(pydev_device.get('DEVNAME'))):\n return False\n if pydev_device.get(\"ID_VENDOR\") == constants.VENDOR_ID_LIO:\n # LIO devices are iSCSI, should be skipped above!\n LOG.error(\"Invalid id_path. Device %s (%s) is iSCSI!\" %\n (id_path, pydev_device.get('DEVNAME')))\n return False\n return True",
"def _check_patch_requirements(region_name,\n applied_patches=None,\n available_patches=None):\n\n api_token = None\n if applied_patches:\n patches_applied = patch_api.patch_is_applied(\n token=api_token,\n timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,\n region_name=region_name,\n patches=applied_patches\n )\n if not patches_applied:\n raise wsme.exc.ClientSideError(_(\n \"The following patches must be applied before doing \"\n \"the kubernetes upgrade: %s\" % applied_patches))\n\n if available_patches:\n patches_available = patch_api.patch_is_available(\n token=api_token,\n timeout=constants.PATCH_DEFAULT_TIMEOUT_IN_SECS,\n region_name=region_name,\n patches=available_patches\n )\n if not patches_available:\n raise wsme.exc.ClientSideError(_(\n \"The following patches must be available before doing \"\n \"the kubernetes upgrade: %s\" %\n available_patches))",
"def is_supported(self) -> bool:\n\n # TODO logging ?\n # TODO ICMP error if ttl is zero\n return self._version == 4 and self._ihl >= 5 and self._ttl != 0",
"def test_patch_pci_switch(self):\n pass",
"def requirements():\n if fabric.api.sudo(\"grep 'release 7' /etc/redhat-release\",quiet=True).succeeded:\n \tprint blue(\"This is a Centos/RedHat 7 server. Please install AIDE.\")\n \treturn 1\n if not rpm_is_installed('glibc.*i686'):\n print red(\"GlibC i686 is not installed\")\n if not file_exists(\"/usr/local/tripwire/tfs/bin/tripwire\", use_sudo=True):\n print red(\"Tripwire is not installed\")"
] | [
"0.6394003",
"0.6082379",
"0.5752724",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.57124674",
"0.5584858",
"0.55626357",
"0.55101615",
"0.5490769",
"0.547495",
"0.54601276",
"0.54075706",
"0.5383899",
"0.53760725",
"0.53601485",
"0.5349725"
] | 0.7114112 | 0 |
Change secure boot settings on the server. | def _change_secure_boot_settings(self, property, value):
system = self._get_host_details()
# find the BIOS URI
if ('links' not in system['Oem']['Hp'] or
'SecureBoot' not in system['Oem']['Hp']['links']):
msg = (' "SecureBoot" resource or feature is not '
'supported on this system')
raise exception.IloCommandNotSupportedError(msg)
secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']
# Change the property required
new_secure_boot_settings = {}
new_secure_boot_settings[property] = value
# perform the patch
status, headers, response = self._rest_patch(
secure_boot_uri, None, new_secure_boot_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
# Change the bios setting as a workaround to enable secure boot
# Can be removed when fixed for Gen9 snap2
val = self._get_bios_setting('CustomPostMessage')
val = val.rstrip() if val.endswith(" ") else val+" "
self._change_bios_setting({'CustomPostMessage': val}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_secure_boot_mode(self, secure_boot_enable):\n sushy_system = self._get_sushy_system()\n try:\n sushy_system.secure_boot.enable_secure_boot(secure_boot_enable)\n except exception.InvalidInputError as e:\n msg = (self._('Invalid input. Error %(error)s')\n % {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to set secure '\n 'boot settings on the server. Error: %(error)s')\n % {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)",
"def set_secure_boot_state(self, task, state):\n return irmc_common.set_secure_boot_mode(task.node, state)",
"def set_secure_boot_mode(self, secure_boot_enable):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('SecureBootEnable',\n secure_boot_enable)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def reset_secure_boot_keys(self):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('ResetToDefaultKeys', True)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_secure_boot\")",
"def setBootargs(self):\n\t\tif self.testType == 'auto' or self.testType == 'manual':\n\t\t\tself.bootargs = self.settings.getKeyValue('nfs.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<nfsroot>', self.nfsroot)\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\n\t\telse:\n\t\t\tself.bootargs = self.settings.getKeyValue('ramdisk.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\t\t\n\t\treturn None",
"def get_secure_boot_mode(self):\n sushy_system = self._get_sushy_system()\n try:\n secure_boot_enabled = GET_SECUREBOOT_CURRENT_BOOT_MAP.get(\n sushy_system.secure_boot.current_boot)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to provide '\n 'information about secure boot on the server. '\n 'Error: %(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexCommandNotSupportedError(msg)\n\n if secure_boot_enabled:\n LOG.debug(self._(\"Secure boot is Enabled\"))\n else:\n LOG.debug(self._(\"Secure boot is Disabled\"))\n return secure_boot_enabled",
"def start_salt():\n with fabric_settings(warn_only=True):\n if env.host == env.master_server.public_ip:\n sudo(\"systemctl start salt-master\")\n time.sleep(3)\n sudo(\"systemctl start salt-minion\")",
"def system_protection_config():\n\n\tprint_section_header(\"GENERAL SYSTEM PROTECTION\", Fore.BLUE)\n\n\t# Enable Gatekeeper\n\tif prompt_yes_no(top_line=\"-> Enable Gatekeeper?\",\n\t bottom_line=\"Defend against malware by enforcing code signing and verifying downloaded applications before letting them to run.\"):\n\t\tprint_confirmation(\"Enabling Gatekeeper...\")\n\t\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\t\tsp.run('sudo spctl --enable --label \"Developer ID\"', shell=True, stdout=sp.PIPE)\n\n\t# Disable automatic software whitelisting\n\tif prompt_yes_no(top_line=\"-> Prevent automatic software whitelisting?\",\n\t bottom_line=\"Both built-in and downloaded software will require user approval for whitelisting.\"):\n\t\tprint_confirmation(\"Preventing automatic whitelisting...\")\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\n\t# Captive Portal\n\tif prompt_yes_no(top_line=\"-> Disable Captive Portal Assistant and force login through browser on untrusted networks?\",\n\t bottom_line=\"Captive Portal could be triggered and direct you to a malicious site WITHOUT any user interaction.\"):\n\t\tprint_confirmation(\"Disabling Captive Portal Assistant...\")\n\t\tsp.run(['sudo', 'defaults', 'write', '/Library/Preferences/SystemConfiguration/com.apple.captive.control', 'Active', '-bool', 'false'], stdout=sp.PIPE)",
"def setprivileged(miner: Miner, login, allowsetting):\n commands = get_changeconfigcommands(getminerfilename(miner), 'api-allow', allowsetting)\n sendcommands_and_restart(miner, login, commands)",
"def set_power(sid):\n # Resolve the passed parameters if any\n timer = None\n os = None\n if request.json:\n if timer in request.json:\n timer = request.json.get('timer')\n if os in request.json:\n os = request.json.get('os')\n db = sqlite3.connect('/home/tropius/TROPIUS/TROPIUS.db')\n state = hosts.get(db, sid)['state']\n \n if state == 'on':\n # The host is on -- turn it off\n # TODO make a unix shell util file\n # TODO make a windows util file\n return\n elif state == 'off':\n # The host is off -- turn it on\n if timer is not None:\n sleep(timer)\n netutil.wake_on_lan(db, sid)\n ret = {'power': {'state': 'on'}}\n return jsonify(ret)\n # TODO find a keyboard driver and implement OS parameter",
"async def _hardcore_setheist(self, ctx):\r\n guild = ctx.guild\r\n config = await self.thief.get_guild_settings(guild)\r\n\r\n if config[\"Hardcore\"]:\r\n config[\"Hardcore\"] = False\r\n msg = \"Hardcore mode now OFF.\"\r\n else:\r\n config[\"Hardcore\"] = True\r\n msg = \"Hardcore mode now ON! **Warning** death will result in credit **and chip wipe**.\"\r\n await self.thief.config.guild(guild).Config.set(config)\r\n await ctx.send(msg)",
"def test_update_bios_boot_mode(self):\n pass",
"def set_password(self, system):\n if system[\"embedded_available\"] and system[\"controller_addresses\"]:\n for url in [\"https://%s:8443/devmgr\" % system[\"controller_addresses\"][0],\n \"https://%s:443/devmgr\" % system[\"controller_addresses\"][0],\n \"http://%s:8080/devmgr\" % system[\"controller_addresses\"][0]]:\n try:\n rc, response = self._request(\"%s/utils/login?uid=admin&xsrf=false&onlycheck=true\" % url, ignore_errors=True, url_username=\"admin\",\n url_password=\"\", validate_certs=False)\n\n if rc == 200: # successful login without password\n system[\"password_set\"] = False\n if system[\"password\"]:\n try:\n rc, storage_system = self._request(\"%s/v2/storage-systems/1/passwords\" % url, method=\"POST\", url_username=\"admin\",\n headers=self.DEFAULT_HEADERS, url_password=\"\", validate_certs=False,\n data=json.dumps({\"currentAdminPassword\": \"\", \"adminPassword\": True,\n \"newPassword\": system[\"password\"]}))\n\n except Exception as error:\n system[\"failed\"] = True\n self.module.warn(\"Failed to set storage system password. Array [%s].\" % system[\"ssid\"])\n break\n\n elif rc == 401: # unauthorized\n system[\"password_set\"] = True\n break\n except Exception as error:\n pass\n else:\n self.module.warn(\"Failed to retrieve array password state. Array [%s].\" % system[\"ssid\"])\n system[\"failed\"] = True",
"def lockdown_procedure():\n\tprint(\"----------\")\n\tprint_section_header(\"LOCKDOWN\", Fore.BLUE)\n\tprint_confirmation(\"Set secure configuration without user interaction.\")\n\n\t# Get sudo priv\n\tsp.run(\"sudo -E -v\", shell=True, stdout=sp.PIPE)\n\n\t####\n\t# FIREWALL\n\t####\n\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchDaemons/com.apple.alf.agent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'launchctl', 'load', '/System/Library/LaunchAgents/com.apple.alf.useragent.plist'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setglobalstate', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setloggingmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setstealthmode', 'on'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', 'pkill', '-HUP', 'socketfilterfw'], stdout=sp.PIPE)\n\n\t####\n\t# SYSTEM PROTECTION\n\t####\n\n\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\tsp.run(['sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.captive.control Active -bool false'], stdout=sp.PIPE)\n\n\t####\n\t# METADATA STORAGE\n\t####\n\n\tsp.run(['rm', '-rfv', '\"~/Library/LanguageModeling/*\"', '\"~/Library/Spelling/*\"', '\"~/Library/Suggestions/*\"'])\n\tsp.run(['rm', '-rfv', '\"~/Library/Application Support/Quick Look/*\"'], stdout=sp.PIPE)\n\tsp.run([':>~/Library/Preferences/com.apple.LaunchServices.QuarantineEventsV2'], shell=True, stdout=sp.PIPE)\n\n\t####\n\t# USER SAFETY\n\t####\n\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPassword', '-int', '1'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.screensaver', 'askForPasswordDelay', '-int', '0'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'AppleShowAllExtensions', '-bool', 'true'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'NSGlobalDomain', 'NSDocumentSaveNewDocumentsToCloud', '-bool', 'false'], stdout=sp.PIPE)\n\tsp.run(['defaults', 'write', 'com.apple.finder', 'AppleShowAllFiles', '-boolean', 'true'], shell=True, stdout=sp.PIPE)\n\tsp.run(['killAll', 'Finder'], stdout=sp.PIPE)\n\n\t####\n\t# RESTART\n\t####\n\n\tfinal_configuration()",
"def get_secure_boot_mode(self):\n system = self._get_host_details()\n\n if ('links' not in system['Oem']['Hp'] or\n 'SecureBoot' not in system['Oem']['Hp']['links']):\n msg = ('\"SecureBoot\" resource or feature is not supported'\n ' on this system')\n raise exception.IloCommandNotSupportedError(msg)\n\n secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']\n\n # get the Secure Boot object\n status, headers, secure_boot_settings = self._rest_get(secure_boot_uri)\n\n if status >= 300:\n msg = self._get_extended_error(secure_boot_settings)\n raise exception.IloError(msg)\n\n return secure_boot_settings['SecureBootCurrentState']",
"def safe_boot_disabled(self, safe_boot_disabled):\n\n self._safe_boot_disabled = safe_boot_disabled",
"def boot(self, boot):\n\n self._boot = boot",
"def test_patch_bios_boot_mode(self):\n pass",
"def reboot_fpga(self):\n log.info(\"Booting FPGA from SPI prom\")\n self.set(\"FPGA_CTRL\", \"boot_fpga\", 1);",
"def __secure_boot(efivars_dir):\n enabled = False\n sboot = glob.glob(os.path.join(efivars_dir, \"SecureBoot-*/data\"))\n if len(sboot) == 1:\n # The minion is usually running as a privileged user, but is\n # not the case for the master. Seems that the master can also\n # pick the grains, and this file can only be readed by \"root\"\n try:\n with salt.utils.files.fopen(sboot[0], \"rb\") as fd:\n enabled = fd.read()[-1:] == b\"\\x01\"\n except PermissionError:\n pass\n return enabled",
"def libc_prctl_set_securebits():\n # straight from man capabilities(7):\n # \"An application can use the following call to lock itself, and all of\n # its descendants, into an environment where the only way of gaining\n # capabilities is by executing a program with associated file capabilities\"\n _call_c_style(\n libc,\n \"prctl\",\n PR_SET_SECUREBITS,\n (\n SECBIT_KEEP_CAPS_LOCKED\n | SECBIT_NO_SETUID_FIXUP\n | SECBIT_NO_SETUID_FIXUP_LOCKED\n | SECBIT_NOROOT\n | SECBIT_NOROOT_LOCKED\n ),\n 0,\n 0,\n 0,\n )",
"def init_settings(self):\n self.app.config.setdefault('SIMPLE_DOMAINS', [])\n self.app.config.setdefault('AWS_ACCESS_KEY_ID', environ.get('AWS_ACCESS_KEY_ID'))\n self.app.config.setdefault('AWS_SECRET_ACCESS_KEY', environ.get('AWS_SECRET_ACCESS_KEY'))\n self.app.config.setdefault('AWS_REGION', environ.get('AWS_REGION', self.DEFAULT_REGION))",
"def set_pending_boot_mode(self, boot_mode):\n boot_mode = boot_mode.lower()\n if boot_mode not in ['uefi', 'legacy']:\n msg = 'Invalid Boot mode specified'\n raise exception.IloInvalidInputError(msg)\n\n boot_properties = {'BootMode': boot_mode}\n\n if boot_mode == 'legacy':\n boot_properties['BootMode'] = 'LegacyBios'\n else:\n # If Boot Mode is 'Uefi' set the UEFIOptimizedBoot first.\n boot_properties['UefiOptimizedBoot'] = \"Enabled\"\n\n # Change the Boot Mode\n self._change_bios_setting(boot_properties)",
"def boot_config():\n # quick check to grab a config file from /boot partition.\n # this function helps users who cannot SSH/access the Pi,\n # but can access the microSD card\n if os.path.exists(BOOT_CONFIG_PATH):\n print(\"Configuration loaded from /boot directory.\")\n with open(BOOT_CONFIG_PATH) as boot_file:\n with open(CONFIG_FILE_PATH, 'w+') as config_file:\n for line in boot_file:\n config_file.write(line)",
"def boot(self):\n\n pass",
"def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')",
"def setrestricted(miner: Miner, login, allowsetting):\n commands = get_changeconfigcommands(getminerfilename(miner), 'api-allow', allowsetting)\n sendcommands_and_restart(miner, login, commands)",
"def _editSysconfig():\n dbUrl = \"jdbc:postgresql://\" + getDbHostName() + \":\" + getDbPort() + \"/\" + basedefs.DB_NAME\n if \"DB_SECURE_CONNECTION\" in controller.CONF.keys() and controller.CONF[\"DB_SECURE_CONNECTION\"] == \"yes\":\n dbUrl = dbUrl + \"?ssl=true&sslfactory=org.postgresql.ssl.NonValidatingFactory\"\n\n proxyEnabled = utils.compareStrIgnoreCase(controller.CONF[\"OVERRIDE_HTTPD_CONFIG\"], \"yes\")\n utils.editEngineSysconfig(proxyEnabled=proxyEnabled,\n dbUrl=dbUrl,\n dbUser=utils.getDbUser(),\n fqdn=controller.CONF[\"HOST_FQDN\"],\n http=controller.CONF[\"HTTP_PORT\"],\n https=controller.CONF[\"HTTPS_PORT\"],\n javaHome=controller.CONF[\"JAVA_HOME\"])",
"def __fill_boot_settings_fields(profile, profile_elements):\n result = True\n selenium2lib = ui_lib.get_s2l()\n # Validate the profile in XML file\n __validate_boot_settings_properties_in_xml_file(profile)\n # If XML is fine, go ahead filling Boot Setting UI fields\n result &= ui_lib.wait_for_element_and_click(profile_elements.ID_COMBO_MENU_VIEW)\n result &= ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_BOOTSETTINGS,\n PerfConstants.DEFAULT_SYNC_TIME)\n ui_lib.wait_for_element_visible(profile_elements.ID_CHKBOX_MANAGE_BOOT)\n if profile.has_property(XML_MANAGE_BOOT_MODE_ATTRIBUTE) and profile.manageBoot == \"false\":\n result &= ui_lib.wait_for_checkbox_and_unselect(profile_elements.ID_CHKBOX_MANAGE_BOOT)\n elif profile.has_property(XML_BOOT_MODE_ATTRIBUTE):\n boot_mode_option = profile.bootMode\n logger._log_to_console_and_log_file(\" --> Selecting Boot Mode..\")\n __select_value_from_a_profile_combo_box(profile_elements.ID_COMBO_PROFILE_BOOT_MODE, profile_elements.ID_COMBO_PROFILE_BOOT_MODE_LIST % boot_mode_option)\n if boot_mode_option == CONSTANT_UEFI or boot_mode_option == CONSTANT_UEFI_OPTIMIZED:\n if profile.has_property(XML_BOOT_POLICY_ATTRIBUTE):\n boot_policy_option = profile.bootPolicy\n result &= __select_value_from_a_profile_combo_box(profile_elements.ID_COMBO_PROFILE_PXE_BOOT_POLICY, profile_elements.ID_COMBO_PROFILE_PXE_BOOT_POLICY_LIST % boot_policy_option)\n result &= ui_lib.wait_for_element_visible(profile_elements.ID_CHKBOX_PROFILE_BOOT_ORDER)\n if profile.has_property(XML_MANAGE_BOOT_ORDER_ATTRIBUTE) and profile.manageBootOrder == \"false\":\n selenium2lib.unselect_checkbox(profile_elements.ID_CHKBOX_PROFILE_BOOT_ORDER)\n else:\n selenium2lib.select_checkbox(profile_elements.ID_CHKBOX_PROFILE_BOOT_ORDER)\n # Set primary boot device\n if profile.has_property(XML_PRIMARY_BOOT_DEVICE):\n primary_boot_device = profile.primaryBootDevice\n result &= __select_value_from_a_profile_combo_box(profile_elements.ID_COMBO_PROFILE_PRIMARY_BOOT_DEVICE, profile_elements.ID_COMBO_PROFILE_PRIMARY_BOOT_DEVICE_LIST % primary_boot_device)\n elif boot_mode_option == CONSTANT_LEGACY_BIOS:\n __fill_boot_order(profile, profile_elements)\n else:\n __fill_boot_order(profile, profile_elements)\n return result"
] | [
"0.7178006",
"0.69563264",
"0.6825512",
"0.68102604",
"0.6628108",
"0.61587363",
"0.59844947",
"0.58227813",
"0.57520616",
"0.5736573",
"0.5724402",
"0.5678746",
"0.56520265",
"0.56394017",
"0.5624491",
"0.5624137",
"0.56189376",
"0.5582487",
"0.55506265",
"0.5549751",
"0.55264896",
"0.55135757",
"0.5503427",
"0.5483059",
"0.5471922",
"0.54675525",
"0.54548615",
"0.5436217",
"0.5388359",
"0.53854364"
] | 0.72850573 | 0 |
Checks if the system is in uefi boot mode. | def _is_boot_mode_uefi(self):
boot_mode = self.get_current_boot_mode()
if boot_mode == 'UEFI':
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_bootable(self):\n return self.bootable_flag == 0x80",
"def has_efi():\n return os.path.exists(\"/sys/firmware/efi\")",
"def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode",
"def is_booted(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def test_update_bios_boot_mode(self):\n pass",
"def get_boot_mode(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def pilotIsBootValid (self):\n return self.isBootValid()",
"def test_patch_bios_boot_mode(self):\n pass",
"def is_http_boot_requested(node):\n http_boot_requested = (\n str(node.driver_info.get('enable_uefi_httpboot', 'false')).lower())\n return http_boot_requested == 'true'",
"def non_root_available(self):\n return self._adb_available and self._dev_emu",
"def is_in_use(self):\n\t\treturn bool(call_sdk_function('PrlBootDev_IsInUse', self.handle))",
"def system_valid(self):\n return self.udev.devices_exist",
"def test_get_bios_boot_mode_by_moid(self):\n pass",
"def CheckBoot(self, instance):\n try:\n serial_out = self.GetSerialPortOutput(instance=instance, port=1)\n self.CheckBootFailure(serial_out, instance)\n return ((self.BOOT_COMPLETED_MSG in serial_out)\n or (self.BOOT_STARTED_MSG in serial_out))\n except errors.HttpError as e:\n if e.code == 400:\n logger.debug(\"CheckBoot: Instance is not ready yet %s\", str(e))\n return False\n raise",
"def check_fw_mode(self, cat_cpuinfo_out):\n for line in cat_cpuinfo_out.splitlines():\n if \"firmware\" in line:\n if \"OPAL\" in line:\n return True\n else:\n return False\n return False",
"def sstbf_enabled():\n return common.SSTBF_CAP in SYSTEM_CAPS",
"def test_get_bios_boot_mode_list(self):\n pass",
"def safe_boot_disabled(self):\n return self._safe_boot_disabled",
"def is_boot_code_present(self):\n\n\t\treturn struct.unpack('<H', self.boot_sector_data[0 : 2])[0] != 0 and struct.unpack('<H', self.boot_sector_data[510 : 512])[0] == 0xAA55",
"def get_pending_boot_mode(self):\n headers, uri, bios_settings = self._check_bios_resource(['BootMode'])\n _, _, settings = self._get_bios_settings_resource(bios_settings)\n boot_mode = settings.get('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n return boot_mode.upper()",
"def is_system(self) -> bool:",
"def is_allow_select_boot_device(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsAllowSelectBootDevice', self.handle))",
"def available(self):\n return self._adb_available and self._dev_emu and (self._is_root\n or self._is_su)",
"def pilotValidateBoot (self):\n return self.validateBoot()",
"def is_sys(self):\n if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:\n return True\n return False",
"def is_sys(self):\n if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:\n return True\n return False",
"def __verify__(cls):\n\n try:\n UpstartSystem()\n return True\n except Exception as e:\n try:\n UpstartSystem(bus=DirectUpstartBus())\n return True\n except Exception as e:\n return False",
"def CheckBootFailure(self, serial_out, instance):\n pass",
"def is_system(self) -> undefined.UndefinedOr[bool]:",
"def check_kernel_module(params) -> None:\n if os.system(\"lsmod | grep v4l2loopback >/dev/null 2>&1\") == 0:\n print(\"Kernel module is loaded\")\n else:\n print(\"Kernel module is NOT loaded\")"
] | [
"0.7057053",
"0.69969964",
"0.6908815",
"0.6746094",
"0.67178434",
"0.6590464",
"0.6447706",
"0.64065146",
"0.6263757",
"0.6129953",
"0.61029017",
"0.6102866",
"0.6059007",
"0.6056331",
"0.6024979",
"0.5940045",
"0.5927244",
"0.590934",
"0.585367",
"0.5818194",
"0.58100754",
"0.57906264",
"0.5783992",
"0.57787377",
"0.5778638",
"0.5778638",
"0.5736396",
"0.56873864",
"0.56853974",
"0.56681496"
] | 0.8531207 | 0 |
Get the status of secure boot. | def get_secure_boot_mode(self):
system = self._get_host_details()
if ('links' not in system['Oem']['Hp'] or
'SecureBoot' not in system['Oem']['Hp']['links']):
msg = ('"SecureBoot" resource or feature is not supported'
' on this system')
raise exception.IloCommandNotSupportedError(msg)
secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']
# get the Secure Boot object
status, headers, secure_boot_settings = self._rest_get(secure_boot_uri)
if status >= 300:
msg = self._get_extended_error(secure_boot_settings)
raise exception.IloError(msg)
return secure_boot_settings['SecureBootCurrentState'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_secure_boot_mode(self):\n sushy_system = self._get_sushy_system()\n try:\n secure_boot_enabled = GET_SECUREBOOT_CURRENT_BOOT_MAP.get(\n sushy_system.secure_boot.current_boot)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to provide '\n 'information about secure boot on the server. '\n 'Error: %(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexCommandNotSupportedError(msg)\n\n if secure_boot_enabled:\n LOG.debug(self._(\"Secure boot is Enabled\"))\n else:\n LOG.debug(self._(\"Secure boot is Disabled\"))\n return secure_boot_enabled",
"def get_secure_boot_state(self, task):\n return irmc_common.get_secure_boot_mode(task.node)",
"def enable_secure_boot(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enable_secure_boot\")",
"def status(self):\n ret = self.dev.ctrl_transfer(0xc0, 0x01, 0x0081, 0x0000, 0x0001)\n if ret[0] == 0xa0:\n return self.POWER_ON\n return self.POWER_OFF",
"def get_status(self):\n return self.o.read_register(self.dev_id, STATUS)",
"def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault",
"def get_status(self):\n return self.read_register(259, 0, 3)",
"def wait_boot(self) -> int:\n return self._data[ATTR_WAIT_BOOT]",
"def safe_boot_disabled(self):\n return self._safe_boot_disabled",
"def _check_status(self):\n self.system_status_lock.acquire()\n info = self.system_status_proxy._getvalue()\n self.system_status_lock.release()\n return info",
"def set_secure_boot_state(self, task, state):\n return irmc_common.set_secure_boot_mode(task.node, state)",
"def get_pending_boot_mode(self):\n headers, uri, bios_settings = self._check_bios_resource(['BootMode'])\n _, _, settings = self._get_bios_settings_resource(bios_settings)\n boot_mode = settings.get('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n return boot_mode.upper()",
"async def get_status(self) -> str:\n return await self.hw_device.status()",
"def status(self):\n return self.microblaze.state",
"def __secure_boot(efivars_dir):\n enabled = False\n sboot = glob.glob(os.path.join(efivars_dir, \"SecureBoot-*/data\"))\n if len(sboot) == 1:\n # The minion is usually running as a privileged user, but is\n # not the case for the master. Seems that the master can also\n # pick the grains, and this file can only be readed by \"root\"\n try:\n with salt.utils.files.fopen(sboot[0], \"rb\") as fd:\n enabled = fd.read()[-1:] == b\"\\x01\"\n except PermissionError:\n pass\n return enabled",
"def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()",
"def is_bootable(self):\n return self.bootable_flag == 0x80",
"def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode",
"def _get_SS_State(self):\r\n try :\r\n state = win32gui.SystemParametersInfo(win32con.SPI_GETSCREENSAVEACTIVE)\r\n return state\r\n except:\r\n self.__error = True\r\n return False",
"def get_status(self):\n\n return self._system",
"def check_reboot_in_progress(con):\n k, v = con.kv.get(\"service/rebootmgr/reboot_in_progress\")\n if v and \"Value\" in v.keys() and v[\"Value\"]:\n return v[\"Value\"].decode()\n return False",
"def get_status():\n return ('off', 'off')",
"def status(self):\n self.scion_sh('status')",
"def status():\n\n\treturn libcrypto.RAND_status()",
"def get_one_time_boot(self):\n system = self._get_host_details()\n try:\n if system['Boot']['BootSourceOverrideEnabled'] == 'Once':\n device = system['Boot']['BootSourceOverrideTarget']\n if device in DEVICE_RIS_TO_COMMON:\n return DEVICE_RIS_TO_COMMON[device]\n return device\n else:\n # value returned by RIBCL if one-time boot setting are absent\n return 'Normal'\n\n except KeyError as e:\n msg = \"get_one_time_boot failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)",
"def get_host_power_status(self):\n sushy_system = self._get_sushy_system()\n return GET_POWER_STATE_MAP.get(sushy_system.power_state)",
"def status(self):\n if self.qemu.is_running():\n status = 0\n self.log.info(\"vm-status\", result=\"online\")\n for device in list(self.qemu.block_info().values()):\n self.log.info(\n \"disk-throttle\",\n device=device[\"device\"],\n iops=device[\"inserted\"][\"iops\"],\n )\n else:\n status = 1\n self.log.info(\"vm-status\", result=\"offline\")\n for volume in self.ceph.volumes:\n locker = volume.lock_status()\n self.log.info(\"rbd-status\", volume=volume.fullname, locker=locker)\n consul = locate_live_service(self.consul, \"qemu-\" + self.name)\n if consul:\n self.log.info(\n \"consul\", service=consul[\"Service\"], address=consul[\"Address\"]\n )\n else:\n self.log.info(\"consul\", service=\"<not registered>\")\n return status",
"def CheckBoot(self, instance):\n try:\n serial_out = self.GetSerialPortOutput(instance=instance, port=1)\n self.CheckBootFailure(serial_out, instance)\n return ((self.BOOT_COMPLETED_MSG in serial_out)\n or (self.BOOT_STARTED_MSG in serial_out))\n except errors.HttpError as e:\n if e.code == 400:\n logger.debug(\"CheckBoot: Instance is not ready yet %s\", str(e))\n return False\n raise",
"def __call__(self):\n status = self.os.popen('circusctl status monitor').read().strip()\n\n if status == 'active':\n return True\n elif status == 'stopped':\n return False",
"def boot(self):\n\t\tmesslen, received = self.socket.send('bootm\\r', 25)\t\t\n\t\treturn None"
] | [
"0.7545377",
"0.73338354",
"0.68596",
"0.6580167",
"0.6284584",
"0.627462",
"0.62408715",
"0.6210762",
"0.6183254",
"0.61830187",
"0.61508465",
"0.61405003",
"0.61379737",
"0.6085388",
"0.6051852",
"0.6020663",
"0.6020096",
"0.6004745",
"0.59862554",
"0.5939429",
"0.58935285",
"0.5873183",
"0.5869869",
"0.58692807",
"0.58469284",
"0.58386815",
"0.58363783",
"0.58091223",
"0.58007264",
"0.57963437"
] | 0.7864809 | 0 |
Reset secure boot keys to manufacturing defaults. | def reset_secure_boot_keys(self):
if self._is_boot_mode_uefi():
self._change_secure_boot_settings('ResetToDefaultKeys', True)
else:
msg = ('System is not in UEFI boot mode. "SecureBoot" related '
'resources cannot be changed.')
raise exception.IloCommandNotSupportedInBiosError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')",
"def reset(ctx, force):\n\n force or click.confirm(\n \"WARNING! This will delete all stored OATH accounts and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n session = ctx.obj[\"session\"]\n click.echo(\"Resetting OATH data...\")\n old_id = session.device_id\n session.reset()\n\n keys = ctx.obj[\"oath_keys\"]\n if old_id in keys:\n del keys[old_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Success! All OATH accounts have been deleted from the YubiKey.\")",
"def reset_pki():\n with open(f'{pki_dir}/serial', 'w') as serial_file:\n serial_file.write('00000000')\n serial_file.close()\n os.remove(f'{pki_dir}/*')",
"def reset_keys(self):\n self.UP_KEY, self.DOWN_KEY, self.START_KEY, self.BACK_KEY = False, False, False, False",
"def reset_keys(self):\n self.UP_KEY, self.DOWN_KEY, self.START_KEY, self.BACK_KEY = False, False, False, False",
"def reboot_fpga(self):\n log.info(\"Booting FPGA from SPI prom\")\n self.set(\"FPGA_CTRL\", \"boot_fpga\", 1);",
"def set_secure_boot_state(self, task, state):\n return irmc_common.set_secure_boot_mode(task.node, state)",
"def set_secure_boot_mode(self, secure_boot_enable):\n sushy_system = self._get_sushy_system()\n try:\n sushy_system.secure_boot.enable_secure_boot(secure_boot_enable)\n except exception.InvalidInputError as e:\n msg = (self._('Invalid input. Error %(error)s')\n % {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to set secure '\n 'boot settings on the server. Error: %(error)s')\n % {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)",
"def shutdown(self):\n auth.debug(\"DICEKey shutdown called\")\n super().shutdown()\n AuthenticatorCryptoProvider.shutdown_providers()",
"def restore_config(self):\n self._clear_previous_windows_assigment()\n self._restart_i3_config()",
"def hotkeys_resetAll():\n _set = validate_hotkeySet(False)\n log.warning(\"All hotkeys on '{0}' set reset to maya defaults\".format(_set))\n mc.hotkey(fs = True )",
"def reset_options(self, keylist):\r\n return self.sendAndRecv(\"RESETCONF %s\\r\\n\"%(\" \".join(keylist)))",
"def set_secure_boot_mode(self, secure_boot_enable):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('SecureBootEnable',\n secure_boot_enable)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def reset_user(self):\n\n if self.resin.auth.is_logged_in():\n self.wipe_application()\n self.resin.models.key.base_request.request(\n 'user__has__public_key', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )",
"def resetDevice(self):\n reset_pkt = [START_BYTE_1, START_BYTE_2, RESET_MTYPE, 0x00, HEADER_SIZE + RESET_DATA_SIZE]\n reset_pkt.extend(RESET_KEY_LE)\n\n crc = crc8(reset_pkt)\n reset_pkt.append(crc)\n\n self.write(bytearray(reset_pkt))",
"def restart_salt():\n stop_salt()\n start_salt()",
"def reset(self,bootloader=False):\n self.send_packet('\\xff' if bootloader else '\\xfe')",
"def hard_reset(self) -> None:\n os.system('rm -fr \"$HOME/.daf/\"')",
"def svc_reset_system_mode(self) -> None:\n self._call_client_api(self._device.reset_mode)",
"def soft_reset():",
"def resetDeviceStates(self):",
"def __mode_reset(self):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tval.reset_restart()",
"def unconfigure_service_password_encryption(device):\n\n try:\n device.configure(\"no service password-encryption\")\n except SubCommandFailure:\n raise SubCommandFailure(\n \"Could not unconfigure service password encryption\"\n )",
"def reset(self):\n self.logger.debug(\"Resetting %s\", self.key)\n self.driver.reset(self.key)",
"def _reset(cls):\r\n cls._CONFIGURED = False\r\n cls._ENABLED = {}",
"def reset(self):\n\n ## Turn off controller to bring to a known state\n try:\n self.logger.info(\"Turning off sta3800 controller (sta3800_off).\")\n ccdsetup.sta3800_off()\n except Exception:\n self.logger.exception(\"Unable to turn off controller! State may be unknown.\")\n raise\n else:\n self.logger.info(\"Controller turned off successfully.\")\n\n ## Initialize controller\n try:\n self.logger.info(\"Turning on sta3800 controller (sta3800_setup).\")\n ccdsetup.sta3800_setup()\n except Exception:\n self.logger.exception(\"Unable to turn on sta3800 controller!\")\n raise\n else:\n self.logger.info(\"Controller turned on successfully.\")",
"def _DisableRootFsVerification(self):\n # 2 and 4 are the kernel partitions.\n for partition in [2, 4]:\n self.RunCmdOnDevice(['/usr/share/vboot/bin/make_dev_ssd.sh',\n '--partitions', str(partition),\n '--remove_rootfs_verification', '--force'])\n\n # Restart, wait a bit, and re-establish the SSH master connection.\n # We need to close the connection gracefully, then run the shutdown command\n # without using a master connection. port_forward=True bypasses the master\n # connection.\n self.CloseConnection()\n self.RunCmdOnDevice(['reboot'], port_forward=True)\n time.sleep(30)\n self.OpenConnection()",
"def _soft_reset(self):\n self._reset_specific_envs(self.episodes_done)\n self._update_other_info()",
"def reset(self):\n command = \"export STLINK_DEVICE=\" + self.stlink.port + \"; st-flash reset\"\n subprocess.run(command, shell=True)\n time.sleep(1)",
"def safe_boot_disabled(self, safe_boot_disabled):\n\n self._safe_boot_disabled = safe_boot_disabled"
] | [
"0.64598894",
"0.61968",
"0.61686015",
"0.6030497",
"0.6030497",
"0.58504564",
"0.5713005",
"0.5710499",
"0.5703248",
"0.56868505",
"0.5620701",
"0.5609751",
"0.5601407",
"0.5587413",
"0.5550478",
"0.55427927",
"0.55306214",
"0.55234766",
"0.55176026",
"0.5505577",
"0.5487669",
"0.54727244",
"0.547004",
"0.54634017",
"0.5460822",
"0.54337865",
"0.5423768",
"0.5415206",
"0.5410141",
"0.5405178"
] | 0.84218895 | 0 |
Perform requested power operation. | def _perform_power_op(self, oper):
power_settings = {"Action": "Reset",
"ResetType": oper}
systems_uri = "/rest/v1/Systems/1"
status, headers, response = self._rest_post(systems_uri, None,
power_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def power():\n request_command(tv_command=TVCommand.power)",
"def power_on(self):\n raise NotImplementedError",
"def pow(self, power):\n daskD.wait(self.client.map(_call_pow, self.vecDask, power=power, pure=False))\n return self",
"def power_on(self):\n pass",
"def power(self, power):\n\n self._power = power",
"def poweron(self):\n raise NotImplementedError()",
"def power(self, value: int):\n self._power = value",
"def get_setPower(self):\n self.read(\":POW?\")",
"def perform(self, context):\r\n context.owner.spendPower(self.power)",
"def __pow__(self, exponent):\n return self.runtime.pow(self, exponent)",
"def power(a, b):\n pass",
"def poweroff(self):\n raise NotImplementedError()",
"def power(self, power: int, matrix_power: bool = False) -> QuantumCircuit:\n raise NotImplementedError",
"def powerIP(self,power):\n np.power(self.t, power, out=self.t)\n return self",
"async def power_on(self):\n ...",
"def __pow__(self,n):\r\n\t\t\r\n\t\t# take power\r\n\t\tp = self.power(n)\r\n\t\t\r\n\t\treturn p",
"def poweroff(self) -> None:\n pass",
"def Incrpower(self, increment):\n self.power += increment",
"def _call_pow(vecObj, power):\n res = vecObj.pow(power)\n return res",
"def power(a, b):\n \n return a**b",
"def power(self):\n return self._power",
"def power(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"power\")",
"def get_power():\n return float(cmd(\"pa?\"))",
"def power(x): \r\n return x(1)",
"def __pow__(self,*args):\r\n pass",
"def power(num,pwr):\n if pwr is 0:\n return 1\n\n if pwr < 0 :\n return \"not supported by this function.\"\n\n if num != 0 and pwr >= 0:\n return num * power(num,pwr-1)",
"def set_power(self, power):\n x = 0\n if power > 100:\n power = 100\n elif power < 0:\n power = 0\n if power != 0:\n while (self.__rpm < 100) and x < 3:\n time.sleep(1)\n x += 1\n if x > 3:\n print(\"Fan doesn't spinn!\")\n return\n self.__pow = power",
"def get_power(self):\r\n x = self.query('POW?')\r\n if x == None: return None\r\n return float(x)",
"def power(self) -> int:\n return self._power",
"def power(self) -> int:\n return self._power"
] | [
"0.71487105",
"0.7076449",
"0.70716",
"0.6828888",
"0.6828583",
"0.670036",
"0.6691989",
"0.66501945",
"0.6643156",
"0.6614282",
"0.6602157",
"0.6598427",
"0.6593769",
"0.6579953",
"0.6551106",
"0.6543305",
"0.6498859",
"0.6488113",
"0.6479012",
"0.646967",
"0.6468145",
"0.6437044",
"0.6413782",
"0.63949406",
"0.6377545",
"0.63713187",
"0.6358516",
"0.6351886",
"0.6347593",
"0.6347593"
] | 0.7263758 | 0 |
Simulates a physical press of the server power button. | def press_pwr_btn(self):
self._press_pwr_btn() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def power():\n request_command(tv_command=TVCommand.power)",
"def _press_pwr_btn(self, pushType=\"Press\"):\n power_settings = {\"Action\": \"PowerButton\",\n \"Target\": \"/Oem/Hp\",\n \"PushType\": pushType}\n\n systems_uri = \"/rest/v1/Systems/1\"\n\n status, headers, response = self._rest_post(systems_uri, None,\n power_settings)\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)",
"async def power_on(self):\n ...",
"def double_click_power(self):\n get_power_event_cmd = (\"getevent -pl 2>&1 | sed -n \"\n \"'/^add/{h}/KEY_POWER/{x;s/[^/]*//p}'\")\n input_event = self.adb.exec_adb_cmd(\n \"shell '{cmd}'\".format(cmd=get_power_event_cmd)).communicate()[0]\n\n self.android_device_driver.adb.exec_adb_cmd(\"shell '{cmd}'\".format(\n cmd=DOUBLE_CLICK_POWER_EVENT_TEMPLATE.format(input_event=input_event)))",
"def _PressLeftButton(self):\n self._kit.MousePressButtons({PeripheralKit.MOUSE_BUTTON_LEFT})\n time.sleep(self.send_delay)",
"def press(self, key: list, t):\n if not self.ser.alive:\n return\n k = '00'\n for v in key:\n k = hex(int(v, 16) ^ int(k, 16))\n if len(k) == 3:\n k = k.replace('0x', '0x0')\n if \"-\" in t:\n val = t.split(\"-\")\n delay = round(random.uniform(float(val[0]), float(val[1])), 4)\n else:\n delay = float(t)\n k = k.replace('0x', '')\n # close relay\n self.ser.write(k.encode('utf-8'), isHex=True)\n # How long do you need to press\n self.log.info('button press time={}'.format(delay))\n time.sleep(delay)\n # release relay\n self.ser.write('00'.encode('utf-8'), isHex=True)",
"def on_press(key):\n currentX, currentY = pyautogui.position()\n\n if key in LEFT_LEYS:\n pyautogui.move(-DEFAULT_MOVEMENT, 0)\n if key in DOWN_KEYS:\n pyautogui.move(0, DEFAULT_MOVEMENT)\n if key in UP_KEYS:\n pyautogui.move(0, -DEFAULT_MOVEMENT)\n if key in RIGHT_KEYS:\n pyautogui.move(DEFAULT_MOVEMENT, 0)\n\n if key in LEFTMOST_KEYS:\n pyautogui.moveTo(0, currentY)\n notify(\"Powermouse\", \"Moved to left of the screen\")\n if key in BOTTOM_KEYS:\n pyautogui.moveTo(currentX, screenHeight)\n notify(\"Powermouse\", \"Moved to bottom of screen\")\n if key in TOP_KEYS:\n pyautogui.moveTo(screenWidth, currentY)\n notify(\"Powermouse\", \"Moved to top of screen\")\n if key in RIGHTMOST_KEYS:\n pyautogui.moveTo(0, currentY)\n notify(\"Powermouse\", \"Moved to right of screen\")\n\n if key in CLICK_KEYS:\n pyautogui.click()\n notify(\"Powermouse\", f\"Clicked at position {pyautogui.position()}\")\n\n if key in QUIT_KEYS:\n notify(\"Powermouse\", \"Quitting\")\n exit()",
"def hold_pwr_btn(self):\n self._press_pwr_btn(pushType=\"PressAndHold\")",
"def _doPowerState(self, state=False):\n if state:\n self._cmdPowerOn()\n else:\n self._cmdPowerOff()",
"def power_on(self):\n pass",
"def m_press(self, button: MButton):\n pass",
"def turn_on(self):\n self._remote.power(1)",
"async def async_press(self) -> None:\n if self.entity_description.key == _RESTART_KEY:\n await self._device.async_reboot()\n else:\n await self._device.async_unpair_remotes()\n await self._device.async_config_remotes(RemoteConfig.OPEN)",
"def _PressRightButton(self):\n self._kit.MousePressButtons({PeripheralKit.MOUSE_BUTTON_RIGHT})\n time.sleep(self.send_delay)",
"def host_power_action(self, host, action):\n return action",
"def host_power_action(self, host, action):\n return action",
"def test_api_ucs_power(self):\n # first power off all servers\n self.set_all_server_power_state(\"off\")\n # verify power state is down\n self.check_all_server_power_state(\"down\")\n # now power on the servers\n self.set_all_server_power_state(\"on\")\n # verify power state is up\n self.check_all_server_power_state(\"up\")",
"def pressed(self):\n print(\"Pressed (Pin: {}): {} -- {}\".format(self.button_pin, datetime.now(), self))\n if self.led.is_lit:\n self.led.off()\n self.mute()\n else:\n self.led.on()\n self.unmute()",
"def _windows_power_control(self):\n\n os_power_command = 'shutdown /r /t 3' if self._power_event_type == 'restart' \\\n else 'shutdown /h /t 3'\n\n exit_code, out = self._staf_start_proc(os_power_command,\n self._sut.bespoke_root,\n self._command_timeout,\n location = self._sut.network_address)\n\n if exit_code != 0:\n raise CoreError('Power control event \"{0}\" failed: {1}'.format(self._name, out))",
"async def sendKeyPress(self, key):\n key = str(key)\n await self.director.sendPostRequest(\n \"/api/v1/items/{}/commands\".format(self.item_id),\n \"KEY_PRESS\",\n {\"KeyName\": key},\n )",
"async def async_turn_on(self):\n data_cmd = _command(COMMAND_POWER_ON)\n await self._async_send_command(data_cmd)",
"def on(cls, client_object):\n vm_mor = client_object.get_api()\n return cls._do_power_action(vm_mor.PowerOnVM_Task())",
"async def power(self, turn_on):\n\n op = DHumOp.ON if turn_on else DHumOp.OFF\n keys = self._get_cmd_keys(CMD_STATE_OPERATION)\n op_value = self.model_info.enum_value(keys[2], op.value)\n if self._should_poll:\n # different power command for ThinQ1 devices\n cmd = \"Start\" if turn_on else \"Stop\"\n await self.set(keys[0], keys[2], key=None, value=cmd)\n self._status.update_status(keys[2], op_value)\n return\n await self.set(keys[0], keys[1], key=keys[2], value=op_value)",
"def set_power(self, power: bool):\r\n if not self.backlight:\r\n return\r\n\r\n self.backlight.power = power",
"def LeftClick(self):\n self._PressLeftButton()\n self._ReleaseAllButtons()",
"async def power_off(self):\n ...",
"def handle_button_press(button_state, mqtt_client, message):\n if button_state:\n ev3.Sound.speak(message).wait()\n mqtt_client.send_message(\"button_pressed\", [message])",
"def power_up(self):\n t_end = time.time() + 3\n while time.time() < t_end:\n self.light_led(5)\n self.light_led(6)",
"def sleep(self):\r\n if not self.backlight:\r\n return\r\n\r\n self.backlight.power = False",
"async def on_buttonA_down(event, data):\n ArmDevice.storage.command[6] = gripper_open_speed"
] | [
"0.74329734",
"0.7021857",
"0.678499",
"0.6608517",
"0.65348643",
"0.64914054",
"0.6230932",
"0.62221473",
"0.62064654",
"0.6177791",
"0.61654186",
"0.61310524",
"0.60974616",
"0.608872",
"0.6059632",
"0.6059632",
"0.5930312",
"0.58865917",
"0.5870763",
"0.5838959",
"0.5828924",
"0.5789177",
"0.5772703",
"0.57638323",
"0.57527804",
"0.5716567",
"0.5698876",
"0.56980443",
"0.5694779",
"0.56930614"
] | 0.7256616 | 1 |
Request the http boot url from system in uefi boot mode. | def get_http_boot_url(self):
if(self._is_boot_mode_uefi() is True):
return self._get_bios_setting('UefiShellStartupUrl')
else:
msg = 'get_http_boot_url is not supported in the BIOS boot mode'
raise exception.IloCommandNotSupportedInBiosError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_http_boot_url(self, url):\n if(self._is_boot_mode_uefi() is True):\n self._change_bios_setting({'UefiShellStartupUrl': url})\n else:\n msg = 'set_http_boot_url is not supported in the BIOS boot mode'\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def get_http_boot_uri(self):\n try:\n sushy_system = self._get_sushy_system()\n http_boot_uri = sushy_system.http_boot_uri.httpbooturi\n except sushy.exceptions.SushyError as e:\n msg = (self._('Not able to find HTTP Boot URI. Error: '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n return http_boot_uri",
"def set_http_boot_uri(self, url):\n try:\n sushy_system = self._get_sushy_system()\n sushy_system.http_boot_uri.set_http_boot_uri(url)\n except sushy.exceptions.SushyError as e:\n msg = (self._('Unable to set HTTP Boot URI. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)",
"def is_http_boot_requested(node):\n http_boot_requested = (\n str(node.driver_info.get('enable_uefi_httpboot', 'false')).lower())\n return http_boot_requested == 'true'",
"def _get_bios_boot_resource(self, data):\n try:\n boot_uri = data['links']['Boot']['href']\n except KeyError:\n msg = ('Boot resource not found.')\n raise exception.IloCommandNotSupportedError(msg)\n\n status, headers, boot_settings = self._rest_get(boot_uri)\n\n if status != 200:\n msg = self._get_extended_error(boot_settings)\n raise exception.IloError(msg)\n\n return boot_settings",
"def elim_bootstrap_fetch(tree):\n\n boot = tree.find('.//target[@name=\"boot\"]')\n for child in boot.findall(\"./exec\"):\n boot.remove(child)\n echo = boot.find(\"./echo\")\n echo.attrib[\"message\"] = \"Not fetching bootstrap libraries in the Fedora build\"",
"def flashUboot(self):\n\t\tif self.settings.getKeyValue('flash.uboot?') == 'y':\n\t\t\tloadAddress = self.settings.getKeyValue('u-boot.flash.address')\n\t\t\tcmd = self.settings.getKeyValue('u-boot.load.command')\n\t\t\tcmd = cmd.replace('<u-boot>', 'u-boot.bin.12x.2430')\n\t\t\tself.socket.send(cmd, 5)\n\t\t\t#self.socket.send('protect off 1:0-1\\r', 2)\n\t\t\t#self.socket.send('erase 1:0-1\\r', 2)\n\t\t\t#self.socket.send('cp.b 80000000 %s 2ffff\\r' % loadAddress)\n\t\t\treturn None\n\t\t\t#cmd = cmd.replace('<u-bootloadadress>', self.u-bootloadaddress)",
"def boot(self):\n\t\tmesslen, received = self.socket.send('bootm\\r', 25)\t\t\n\t\treturn None",
"def set_bios_bootmode_uefi(ip, login_account, login_password, system_id):\n result = {}\n login_host = \"https://\" + ip\n try:\n # Connect using the BMC address, account name, and password\n # Create a REDFISH object\n REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account, timeout=utils.g_timeout,\n password=login_password, default_prefix='/redfish/v1', cafile=utils.g_CAFILE)\n # Login into the server and create a session\n REDFISH_OBJ.login(auth=utils.g_AUTH)\n except:\n traceback.print_exc()\n result = {'ret': False, 'msg': \"Please check the username, password, IP is correct\"}\n return result\n\n # GET the ComputerSystem resource\n system = utils.get_system_url(\"/redfish/v1\", system_id, REDFISH_OBJ)\n if not system:\n result = {'ret': False, 'msg': \"This system id is not exist or system member is None\"}\n REDFISH_OBJ.logout()\n return result\n for i in range(len(system)):\n system_url = system[i]\n response_system_url = REDFISH_OBJ.get(system_url, None)\n if response_system_url.status != 200:\n error_message = utils.get_extended_error(response_system_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (system_url, response_system_url.status, error_message)}\n REDFISH_OBJ.logout()\n return result\n else:\n # Get the bios resource\n bios_url = response_system_url.dict['Bios']['@odata.id']\n response_bios_url = REDFISH_OBJ.get(bios_url, None)\n if response_bios_url.status != 200:\n error_message = utils.get_extended_error(response_bios_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (bios_url, response_bios_url.status, error_message)}\n REDFISH_OBJ.logout()\n return result\n else: # Get bios success\n # Seek boot mode from bios attributes\n attribute_bootmode = None\n attributes = response_bios_url.dict['Attributes']\n for attribute in attributes:\n if attribute == \"BootMode\" or attribute == \"SystemBootMode\":\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n for attribute in attributes:\n if \"SystemBootMode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n for attribute in attributes:\n if \"Boot\" in attribute and \"Mode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n result = {'ret': False, 'msg': \"Can not found BootMode attribute in response of url %s\" %(bios_url)}\n REDFISH_OBJ.logout()\n return result\n\n # Get boot mode setting guide from bios registry\n WarningText = None\n ValueName = None\n bios_registry_url = \"/redfish/v1/Registries/\" + response_bios_url.dict['AttributeRegistry']\n response_bios_registry_url = REDFISH_OBJ.get(bios_registry_url, None)\n if response_bios_registry_url.status == 200:\n locations = response_bios_registry_url.dict['Location']\n bios_regjson_url = None\n for location in locations:\n if 'en' in location['Language']:\n bios_regjson_url = location['Uri']\n break\n if bios_regjson_url:\n response_bios_regjson_url = REDFISH_OBJ.get(bios_regjson_url, None)\n if response_bios_regjson_url.status == 200:\n regattributes = response_bios_regjson_url.dict['RegistryEntries']['Attributes']\n for regattribute in regattributes:\n if regattribute['AttributeName'] == attribute_bootmode:\n if 'WarningText' in regattribute:\n WarningText = regattribute['WarningText']\n for value in regattribute['Value']:\n if 'legacy' in value['ValueName'].lower():\n continue\n if 'uefi' in value['ValueName'].lower():\n ValueName = value['ValueName']\n break\n ValueName = value['ValueName']\n break\n \n # Perform patch to set\n if ValueName == None:\n ValueName = \"UEFIMode\"\n pending_url = response_bios_url.dict['@Redfish.Settings']['SettingsObject']['@odata.id']\n parameter = {attribute_bootmode: ValueName}\n attribute = {\"Attributes\": parameter}\n headers = {\"If-Match\": '*'}\n response_pending_url = REDFISH_OBJ.patch(pending_url, body=attribute, headers=headers)\n if response_pending_url.status in [200,204]:\n if WarningText:\n result = {'ret': True, 'msg': 'set bios bootmode uefi successful. WarningText: %s'% (WarningText) }\n else:\n result = {'ret': True, 'msg': 'set bios bootmode uefi successful'}\n elif response_pending_url.status == 405:\n result = {'ret': False, 'msg': \"Resource not supported\"}\n else:\n error_message = utils.get_extended_error(response_pending_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (\n pending_url, response_pending_url.status, error_message)}\n\n # Logout of the current session\n try:\n REDFISH_OBJ.logout()\n except:\n pass\n return result",
"def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault",
"def bootloader() -> NoReturn:",
"def test_get_bios_boot_mode_by_moid(self):\n pass",
"def actionFromweb(self):\n print(\"Grabbing %x firmware.\" % self.dev_id)\n print(\"%s\" % firmware[self.dev_id])\n fn=\"/tmp/.goodfet.hex\"\n os.system(\"curl %s >%s\" % (firmware[self.dev_id],fn))\n\n fw=Memory(fn)\n #fw.loadIhex(open(fn,\"rb\"))\n\n sys.stderr.write(\"Program ...\\n\")\n sys.stderr.flush()\n self.programData(fw, self.ACTION_PROGRAM | self.ACTION_VERIFY)\n sys.stderr.write(\"%i bytes programmed.\\n\" % self.byteCtr)\n sys.stderr.flush()",
"def boot(self):\n\n pass",
"def get_boot_order(rfo, api=1, unit=1):\n\n url = f\"/redfish/v{api}/systems/{unit}/bios\"\n res = rfo.get(url)\n if res.status != 200:\n print(f\"Error: {res.status}: {res.read}\")\n return \"XXX\"\n booturl = res.dict['Oem']['Hpe']['Links']['Boot']['@odata.id']\n res = rfo.get(booturl)\n if res.status != 200:\n print(f\"HTTP Fail Status: {res.status} - {res.read}\")\n return \"XXX\"\n return res.dict['DefaultBootOrder']",
"def get_server_url():\n try:\n url = os.environ['API_HOST']\n # print('[ OK ] Server url loaded: ', url)\n except KeyError:\n url = 'http://localhost:3300/'\n print('[ WARNING ] API_HOST environment variable was not found. default server url was set at: ', url)\n\n return url",
"def test_update_bios_boot_mode(self):\n pass",
"def setUbootFlashAddress(self):\n\t\tself.ubootflashaddress = self.settings.getKeyValue('u-boot.flash.address')\n\t\treturn None",
"async def init_boot_time(self):\n await self.exec_cmd(self._parse_boottime_hostname,\n [\"show version|json\", \"show hostname\"], None,\n 'mixed')",
"async def get_device_boottime_hostname(self):\n\n if self.transport == 'https':\n cmdlist = [\"show version\", \"show hostname\"]\n else:\n cmdlist = [\"show version|json\", \"show hostname|json\"]\n await self.exec_cmd(self._parse_boottime_hostname, cmdlist, None)",
"def boot(self, boot_node_request):\n return self.client.call('POST',\n self.name + 'boot', payload=boot_node_request)",
"def get_secure_boot_mode(self):\n system = self._get_host_details()\n\n if ('links' not in system['Oem']['Hp'] or\n 'SecureBoot' not in system['Oem']['Hp']['links']):\n msg = ('\"SecureBoot\" resource or feature is not supported'\n ' on this system')\n raise exception.IloCommandNotSupportedError(msg)\n\n secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']\n\n # get the Secure Boot object\n status, headers, secure_boot_settings = self._rest_get(secure_boot_uri)\n\n if status >= 300:\n msg = self._get_extended_error(secure_boot_settings)\n raise exception.IloError(msg)\n\n return secure_boot_settings['SecureBootCurrentState']",
"def start(self, module=None, app=None, delay=None):\n with (app or flask.current_app).test_request_context():\n path = self.url(delay=delay)\n\n url = 'https://{module}-dot-{hostname}{path}'.format(\n module=module or self.module_name,\n hostname=app_identity.get_default_version_hostname(),\n path=path)\n\n urlfetch.fetch(url)",
"def boot():\r\n print \"\"\"\r\n ###### ## ## ### ## ## ## ## ######## ########\r\n ## ## ## ## ## ## ### ## ## ## ## ## ##\r\n ## #### ## ## #### ## ## ## ## ## ##\r\n ## ## ## ## ## ## ## ## ## ######## ######\r\n ## ## ######### ## #### ## ## ## ## ##\r\n ## ## ## ## ## ## ### ## ## ## ## ##\r\n ###### ## ## ## ## ## ####### ## ## ########\r\n\r\n Version %s-%s\r\n\r\n Multi Purpose Artificial Inelegance Program\r\n Copyright (c) Alexandre Gauthier 2010-2011\r\n All Rights Reserved\r\n \"\"\" % ( constants.VERSION, constants.TAGNAME )\r\n\r\n # Initialize log\r\n # TODO: The values should be read from config file.\r\n log.init_log('cyanure.log', 'DEBUG')\r\n\r\n logger.info(\"Cyanure system init: Version %s (%s)\" % (\r\n constants.VERSION, constants.TAGNAME ))",
"def main():\n kernel_params = _parse_kernel_cmdline()\n api_url = kernel_params.get('ipa-api-url')\n if api_url is None:\n _process_error('Mandatory kernel parameter \"ipa-api-url\" is missing.')\n\n boot_mac = kernel_params.get('BOOTIF')\n if boot_mac is None:\n _process_error('Cannot define boot interface, \"BOOTIF\" kernel '\n 'parameter is missing.')\n\n # There is a difference in syntax in BOOTIF variable between pxe and ipxe\n # boot with Ironic.\n # For pxe boot the the leading `01-' denotes the device type (Ethernet)\n # and is not a part of the MAC address\n if boot_mac.startswith('01-'):\n boot_mac = boot_mac[3:].replace('-', ':')\n\n # FIXME(pas-ha) discover all MACs\n node = lookup(api_url, [boot_mac])\n uuid = node['node']['uuid']\n timeout = node['config']['heartbeat_timeout']\n\n heartbeat_url = '{api_url}/v1/heartbeat/{uuid}'.format(api_url=api_url,\n uuid=uuid)\n for n in range(_GET_ADDR_MAX_ITERATION):\n boot_ip = _get_interface_ip(boot_mac)\n if boot_ip is not None:\n break\n time.sleep(_RETRY_INTERVAL)\n else:\n _process_error('Cannot find IP address of boot interface.')\n\n heartbeat(heartbeat_url, boot_ip, timeout)",
"def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()",
"async def init_boot_time(self):\n await self.exec_cmd(self._parse_boottime_hostname,\n [\"show version\"], None)",
"def base_url(self):\n return \"https://api.byte-stack.net\" if self.use_sandbox \\\n else \"https://api.ovo.id\"",
"def start(self):\n self.get(self.url)",
"def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode"
] | [
"0.77584946",
"0.7341055",
"0.71151394",
"0.65061367",
"0.5847443",
"0.58197343",
"0.5644982",
"0.5516919",
"0.54772717",
"0.54441345",
"0.54027355",
"0.53707486",
"0.53131366",
"0.52545315",
"0.5227558",
"0.5223011",
"0.5216488",
"0.5198572",
"0.51643014",
"0.5142962",
"0.51252884",
"0.5124703",
"0.5098322",
"0.50982964",
"0.5086413",
"0.5071438",
"0.50681615",
"0.50618476",
"0.50538963",
"0.50493443"
] | 0.81860113 | 0 |
Set url to the UefiShellStartupUrl to the system in uefi boot mode. | def set_http_boot_url(self, url):
if(self._is_boot_mode_uefi() is True):
self._change_bios_setting({'UefiShellStartupUrl': url})
else:
msg = 'set_http_boot_url is not supported in the BIOS boot mode'
raise exception.IloCommandNotSupportedInBiosError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_http_boot_uri(self, url):\n try:\n sushy_system = self._get_sushy_system()\n sushy_system.http_boot_uri.set_http_boot_uri(url)\n except sushy.exceptions.SushyError as e:\n msg = (self._('Unable to set HTTP Boot URI. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)",
"def get_http_boot_url(self):\n if(self._is_boot_mode_uefi() is True):\n return self._get_bios_setting('UefiShellStartupUrl')\n else:\n msg = 'get_http_boot_url is not supported in the BIOS boot mode'\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def get_http_boot_uri(self):\n try:\n sushy_system = self._get_sushy_system()\n http_boot_uri = sushy_system.http_boot_uri.httpbooturi\n except sushy.exceptions.SushyError as e:\n msg = (self._('Not able to find HTTP Boot URI. Error: '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n return http_boot_uri",
"def go_to_setup_home(self):\n url = self.cumulusci.org.lightning_base_url\n self.selenium.go_to(url + \"/lightning/setup/SetupOneHome/home\")\n self.wait_until_loading_is_complete()",
"def setUbootFlashAddress(self):\n\t\tself.ubootflashaddress = self.settings.getKeyValue('u-boot.flash.address')\n\t\treturn None",
"def setup(self, url, browser_config):\n\n # navigate to the front page\n browser.open_url(url)",
"def pibooth_startup(cfg, app):",
"def _set_url(self): \n self.url = self.geturl()",
"def set_url(self, url):\n super(Cabling, self).set_url(url)",
"def SetBootloaderEnv(script, name, val):\n script.AppendExtra('set_bootloader_env(\"%s\", \"%s\");' % (name, val))",
"def flashUboot(self):\n\t\tif self.settings.getKeyValue('flash.uboot?') == 'y':\n\t\t\tloadAddress = self.settings.getKeyValue('u-boot.flash.address')\n\t\t\tcmd = self.settings.getKeyValue('u-boot.load.command')\n\t\t\tcmd = cmd.replace('<u-boot>', 'u-boot.bin.12x.2430')\n\t\t\tself.socket.send(cmd, 5)\n\t\t\t#self.socket.send('protect off 1:0-1\\r', 2)\n\t\t\t#self.socket.send('erase 1:0-1\\r', 2)\n\t\t\t#self.socket.send('cp.b 80000000 %s 2ffff\\r' % loadAddress)\n\t\t\treturn None\n\t\t\t#cmd = cmd.replace('<u-bootloadadress>', self.u-bootloadaddress)",
"def goto_environment_url(self):\n try:\n self._browser.get(self._environment.url)\n except Exception as e:\n self.logger.error(\"Error going to environment '\" + self._environment.url + \"' : \" + str(e))\n raise",
"def activator_ui():\n local('activator ui -Dhttp.address=10.0.2.15')",
"def set_url(self, url):\n self.url = url",
"async def snekurl(self, ctx: commands.Context, url=None):\r\n\r\n if not url:\r\n current_url = await self.conf.snekbox_url()\r\n await ctx.send_help()\r\n return await ctx.send(\"`Current snekbox URL: {}`\".format(current_url))\r\n\r\n async with ctx.typing():\r\n if await self._test_snekurl(url):\r\n await self.conf.snekbox_url.set(url)\r\n return await ctx.send(\":white_check_mark: It's working! New url set.\")\r\n\r\n await ctx.send(\":x: URL doesn't seem to work.\")",
"def set_url(self, url):\n self.data['url'] = url",
"def setBootargs(self):\n\t\tif self.testType == 'auto' or self.testType == 'manual':\n\t\t\tself.bootargs = self.settings.getKeyValue('nfs.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<nfsroot>', self.nfsroot)\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\n\t\telse:\n\t\t\tself.bootargs = self.settings.getKeyValue('ramdisk.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\t\t\n\t\treturn None",
"def set_url(self, url):\n if url is not None:\n self.url = url",
"def set_url(self, url):\n self.url = url",
"async def set_event_url(self, event_url: Optional[str]) -> None:\n if not event_url:\n event_url = self._server.url\n url = quote(str(event_url), safe=\"\")\n _LOGGER.info(\"Setting event update URL to %s\", url)\n await self._api_request(f\"postURL/{url}\")",
"def set_lxd_init_auto(self):\n delay = 2\n for attempt in range(5):\n out = utils.run_script(\"conjure-up.lxd init --auto\")\n if out.returncode == 0:\n return\n time.sleep(delay)\n raise Exception(\n \"Problem running lxd init: {}\".format(out.stderr.decode()))",
"def load_path_url():\n web.ctx.path_url = web.ctx.home + web.ctx.path",
"def web_shell(self, web_shell):\n\n self._web_shell = web_shell",
"def setUrl( self, url ):\n self._urlEdit.setText(str(url))",
"def test_home(self):\n self.selenium.get('{}/'.format(self.live_server_url))",
"def step_impl(context, url):\n context.base_url = url",
"async def test_setup(hass: HomeAssistant, ufp: MockUFPFixture) -> None:\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n\n assert ufp.entry.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert ufp.entry.unique_id == ufp.api.bootstrap.nvr.mac",
"def setKernelLoadAddress(self):\n\t\tself.kernelloadaddress = self.settings.getKeyValue('kernel.load.address')\n\t\treturn None",
"def do_startup(self):\n logger.debug('::startup')\n Gio.Application.do_startup(self)\n action = Gio.SimpleAction.new('quit', None)\n action.connect('activate', self.quit)\n self.add_action(action)\n # Initialize the current profiles, but do not auto load\n try:\n self.load_profile(self._settings.get_string('current-profile'), False, False)\n if self._settings.get_string('lockscreen-profile') != \"\":\n self.load_profile(self._settings.get_string('lockscreen-profile'), True, False)\n except (WallpaperNotFoundError, NotFoundError) as e:\n # If we failed to load the profile, its bad\n logger.error('failed to load profiles on startup: %s', e.message)\n # Connect the settings signals\n self._settings_handlers.append(self._settings.connect(\n 'changed::rotation',\n lambda s, k: self._toggle_timer(self._settings.get_string('rotation'))\n ))\n self._settings_handlers.append(self._settings.connect(\n 'changed::interval',\n lambda s, k: self._toggle_timer(self._settings.get_string('rotation'))\n ))\n self._settings_handlers.append(self._settings.connect('changed::current-profile', self._callback_desktop))\n self._settings_handlers.append(self._settings.connect('changed::lockscreen-profile', self._callback_lockscreen))\n self._settings_handlers.append(self._settings.connect('changed::update-lockscreen', self._callback_lockscreen))",
"def set_normal_environment(self):\n if 'RUSTUP_DIST_SERVER' in os.environ:\n self._download_url = os.environ['RUSTUP_DIST_SERVER']\n else:\n self._download_url = 'https://static.rust-lang.org'"
] | [
"0.72488326",
"0.7194045",
"0.5525388",
"0.55191153",
"0.5504752",
"0.5186038",
"0.51711583",
"0.5048918",
"0.50152254",
"0.49929443",
"0.49760246",
"0.49735522",
"0.49065635",
"0.48945484",
"0.48868546",
"0.48841438",
"0.483911",
"0.48351452",
"0.4814122",
"0.48077625",
"0.4805337",
"0.47425383",
"0.47400776",
"0.47386676",
"0.4734691",
"0.4729684",
"0.47245958",
"0.47183496",
"0.4716726",
"0.4693407"
] | 0.8233862 | 0 |
Set iscsi details of the system in uefi boot mode. The iSCSI initiator is identified by the MAC provided. The initiator system is set with the target details like IQN, LUN, IP, Port etc. | def set_iscsi_boot_info(self, mac, target_name, lun, ip_address,
port='3260', auth_method=None, username=None,
password=None):
if(self._is_boot_mode_uefi() is True):
iscsi_info = {}
iscsi_info['iSCSITargetName'] = target_name
iscsi_info['iSCSIBootLUN'] = lun
iscsi_info['iSCSITargetIpAddress'] = ip_address
iscsi_info['iSCSITargetTcpPort'] = int(port)
iscsi_info['iSCSITargetInfoViaDHCP'] = False
iscsi_info['iSCSIBootEnable'] = 'Enabled'
if (auth_method == 'CHAP'):
iscsi_info['iSCSIAuthenticationMethod'] = 'Chap'
iscsi_info['iSCSIChapUsername'] = username
iscsi_info['iSCSIChapSecret'] = password
self._change_iscsi_settings(mac.upper(), iscsi_info)
else:
msg = 'iscsi boot is not supported in the BIOS boot mode'
raise exception.IloCommandNotSupportedInBiosError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unset_iscsi_boot_info(self, mac):\n if(self._is_boot_mode_uefi() is True):\n iscsi_info = {'iSCSIBootEnable': 'Disabled'}\n self._change_iscsi_settings(mac.upper(), iscsi_info)\n else:\n msg = 'iscsi boot is not supported in the BIOS boot mode'\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def set_bios_bootmode_uefi(ip, login_account, login_password, system_id):\n result = {}\n login_host = \"https://\" + ip\n try:\n # Connect using the BMC address, account name, and password\n # Create a REDFISH object\n REDFISH_OBJ = redfish.redfish_client(base_url=login_host, username=login_account, timeout=utils.g_timeout,\n password=login_password, default_prefix='/redfish/v1', cafile=utils.g_CAFILE)\n # Login into the server and create a session\n REDFISH_OBJ.login(auth=utils.g_AUTH)\n except:\n traceback.print_exc()\n result = {'ret': False, 'msg': \"Please check the username, password, IP is correct\"}\n return result\n\n # GET the ComputerSystem resource\n system = utils.get_system_url(\"/redfish/v1\", system_id, REDFISH_OBJ)\n if not system:\n result = {'ret': False, 'msg': \"This system id is not exist or system member is None\"}\n REDFISH_OBJ.logout()\n return result\n for i in range(len(system)):\n system_url = system[i]\n response_system_url = REDFISH_OBJ.get(system_url, None)\n if response_system_url.status != 200:\n error_message = utils.get_extended_error(response_system_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (system_url, response_system_url.status, error_message)}\n REDFISH_OBJ.logout()\n return result\n else:\n # Get the bios resource\n bios_url = response_system_url.dict['Bios']['@odata.id']\n response_bios_url = REDFISH_OBJ.get(bios_url, None)\n if response_bios_url.status != 200:\n error_message = utils.get_extended_error(response_bios_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (bios_url, response_bios_url.status, error_message)}\n REDFISH_OBJ.logout()\n return result\n else: # Get bios success\n # Seek boot mode from bios attributes\n attribute_bootmode = None\n attributes = response_bios_url.dict['Attributes']\n for attribute in attributes:\n if attribute == \"BootMode\" or attribute == \"SystemBootMode\":\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n for attribute in attributes:\n if \"SystemBootMode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n for attribute in attributes:\n if \"Boot\" in attribute and \"Mode\" in attribute:\n attribute_bootmode = attribute\n break\n if attribute_bootmode == None:\n result = {'ret': False, 'msg': \"Can not found BootMode attribute in response of url %s\" %(bios_url)}\n REDFISH_OBJ.logout()\n return result\n\n # Get boot mode setting guide from bios registry\n WarningText = None\n ValueName = None\n bios_registry_url = \"/redfish/v1/Registries/\" + response_bios_url.dict['AttributeRegistry']\n response_bios_registry_url = REDFISH_OBJ.get(bios_registry_url, None)\n if response_bios_registry_url.status == 200:\n locations = response_bios_registry_url.dict['Location']\n bios_regjson_url = None\n for location in locations:\n if 'en' in location['Language']:\n bios_regjson_url = location['Uri']\n break\n if bios_regjson_url:\n response_bios_regjson_url = REDFISH_OBJ.get(bios_regjson_url, None)\n if response_bios_regjson_url.status == 200:\n regattributes = response_bios_regjson_url.dict['RegistryEntries']['Attributes']\n for regattribute in regattributes:\n if regattribute['AttributeName'] == attribute_bootmode:\n if 'WarningText' in regattribute:\n WarningText = regattribute['WarningText']\n for value in regattribute['Value']:\n if 'legacy' in value['ValueName'].lower():\n continue\n if 'uefi' in value['ValueName'].lower():\n ValueName = value['ValueName']\n break\n ValueName = value['ValueName']\n break\n \n # Perform patch to set\n if ValueName == None:\n ValueName = \"UEFIMode\"\n pending_url = response_bios_url.dict['@Redfish.Settings']['SettingsObject']['@odata.id']\n parameter = {attribute_bootmode: ValueName}\n attribute = {\"Attributes\": parameter}\n headers = {\"If-Match\": '*'}\n response_pending_url = REDFISH_OBJ.patch(pending_url, body=attribute, headers=headers)\n if response_pending_url.status in [200,204]:\n if WarningText:\n result = {'ret': True, 'msg': 'set bios bootmode uefi successful. WarningText: %s'% (WarningText) }\n else:\n result = {'ret': True, 'msg': 'set bios bootmode uefi successful'}\n elif response_pending_url.status == 405:\n result = {'ret': False, 'msg': \"Resource not supported\"}\n else:\n error_message = utils.get_extended_error(response_pending_url)\n result = {'ret': False, 'msg': \"Url '%s' response Error code %s \\nerror_message: %s\" % (\n pending_url, response_pending_url.status, error_message)}\n\n # Logout of the current session\n try:\n REDFISH_OBJ.logout()\n except:\n pass\n return result",
"def set_boot_device(self, task, device, persistent=False):\n if (getattr(task.node, 'power_interface') == 'ipmitool'\n or task.node.driver_internal_info.get('irmc_ipmi_succeed')):\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(_(\n \"Invalid boot device %s specified.\") % device)\n\n uefi_mode = (\n boot_mode_utils.get_boot_mode(task.node) == 'uefi')\n\n # disable 60 secs timer\n timeout_disable = \"0x00 0x08 0x03 0x08\"\n ipmitool.send_raw(task, timeout_disable)\n\n # note(naohirot):\n # Set System Boot Options : ipmi cmd '0x08', bootparam '0x05'\n #\n # $ ipmitool raw 0x00 0x08 0x05 data1 data2 0x00 0x00 0x00\n #\n # data1 : '0xe0' persistent + uefi\n # '0xc0' persistent + bios\n # '0xa0' next only + uefi\n # '0x80' next only + bios\n # data2 : boot device defined in the dict _BOOTPARAM5_DATA2\n\n bootparam5 = '0x00 0x08 0x05 %s %s 0x00 0x00 0x00'\n if persistent:\n data1 = '0xe0' if uefi_mode else '0xc0'\n else:\n data1 = '0xa0' if uefi_mode else '0x80'\n data2 = _BOOTPARAM5_DATA2[device]\n\n cmd8 = bootparam5 % (data1, data2)\n ipmitool.send_raw(task, cmd8)\n else:\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(_(\n \"Invalid boot device %s specified. \"\n \"Current iRMC firmware condition doesn't support IPMI \"\n \"but Redfish.\") % device)\n super(ipmitool.IPMIManagement, self).set_boot_device(\n task, device, persistent)",
"def test_update_bios_boot_mode(self):\n pass",
"def setMAC( self, intf, mac ):\n result = self.cmd( 'ifconfig', intf, 'down' )\n result += self.cmd( 'ifconfig', intf, 'hw', 'ether', mac )\n result += self.cmd( 'ifconfig', intf, 'up' )\n return result",
"def set_boot_options(self, image_name, **vendor_specifics):\n current_boot = self.show(\"show running-config | inc ^boot system \")\n file_system = vendor_specifics.get(\"file_system\")\n if file_system is None:\n file_system = self._get_file_system()\n\n file_system_files = self.show(f\"dir {file_system}\")\n if re.search(image_name, file_system_files) is None:\n log.error(\"Host %s: File not found error for image %s.\", self.host, image_name)\n raise NTCFileNotFoundError(\n # TODO: Update to use hostname\n hostname=self.host,\n file=image_name,\n directory=file_system,\n )\n\n current_images = current_boot.splitlines()\n commands_to_exec = [f\"no {image}\" for image in current_images]\n commands_to_exec.append(f\"boot system {file_system}/{image_name}\")\n self.config(commands_to_exec)\n\n self.save()\n if self.boot_options[\"sys\"] != image_name:\n log.error(\"Host %s: Setting boot command did not yield expected results\", self.host)\n raise CommandError(\n command=f\"boot system {file_system}/{image_name}\",\n message=\"Setting boot command did not yield expected results\",\n )\n\n log.info(\"Host %s: boot options have been set to %s\", self.host, image_name)",
"def iscsi_target(self, iscsi_target):\n\n self._iscsi_target = iscsi_target",
"def test_patch_bios_boot_mode(self):\n pass",
"def set_os_mtu(self, iface=None, mtu=None):\n pytest.skip(\"Method is not supported by Iperf TG\")",
"def _update_persistent_boot(self, device_type=[], persistent=False,\n mac=None):\n tenure = 'Once'\n new_device = device_type[0]\n # If it is a standard device, we need to convert in RIS convention\n if device_type[0].upper() in DEVICE_COMMON_TO_RIS:\n new_device = DEVICE_COMMON_TO_RIS[device_type[0].upper()]\n\n if persistent:\n tenure = 'Continuous'\n\n systems_uri = \"/rest/v1/Systems/1\"\n # Need to set this option first if device is 'UefiTarget'\n if new_device is 'UefiTarget':\n if not mac:\n msg = ('Mac is needed for iscsi uefi boot')\n raise exception.IloInvalidInputError(msg)\n\n headers, bios_uri, bios_settings = self._check_bios_resource()\n # Get the Boot resource and Mappings resource.\n boot_settings = self._get_bios_boot_resource(bios_settings)\n StructuredBootString = None\n\n for boot_setting in boot_settings['BootSources']:\n if(mac.upper() in boot_setting['UEFIDevicePath'] and\n 'iSCSI' in boot_setting['UEFIDevicePath']):\n StructuredBootString = boot_setting['StructuredBootString']\n break\n if not StructuredBootString:\n msg = ('MAC provided is Invalid \"%s\"' % mac)\n raise exception.IloInvalidInputError(msg)\n\n new_boot_settings = {}\n new_boot_settings['Boot'] = {'UefiTargetBootSourceOverride':\n StructuredBootString}\n status, headers, response = self._rest_patch(systems_uri, None,\n new_boot_settings)\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)\n\n new_boot_settings = {}\n new_boot_settings['Boot'] = {'BootSourceOverrideEnabled': tenure,\n 'BootSourceOverrideTarget': new_device}\n status, headers, response = self._rest_patch(systems_uri, None,\n new_boot_settings)\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)",
"def login_iscsi_target(self, portal_config, target_config):\n ip = portal_config.get('ip')\n port = portal_config.get('port')\n iqn = target_config.get('iqn')\n if ip and port and iqn:\n command = 'iscsiadm -m node -l -T %s -p %s:%d' % (iqn, ip, port)\n self.cmd(command)",
"def setBootargs(self):\n\t\tif self.testType == 'auto' or self.testType == 'manual':\n\t\t\tself.bootargs = self.settings.getKeyValue('nfs.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<nfsroot>', self.nfsroot)\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\n\t\telse:\n\t\t\tself.bootargs = self.settings.getKeyValue('ramdisk.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\t\t\n\t\treturn None",
"def configure(self):\n self.node.get_logger().info('Configuring device...')\n try:\n data = self.con.receive(registers.BNO055_CHIP_ID_ADDR, 1)\n if data[0] != registers.BNO055_ID:\n raise IOError('Device ID=%s is incorrect' % data)\n # print(\"device sent \", binascii.hexlify(data))\n except Exception as e: # noqa: B902\n # This is the first communication - exit if it does not work\n self.node.get_logger().error('Communication error: %s' % e)\n self.node.get_logger().error('Shutting down ROS node...')\n sys.exit(1)\n\n # IMU connected => apply IMU Configuration:\n if not (self.con.transmit(registers.BNO055_OPR_MODE_ADDR, 1, bytes([registers.OPERATION_MODE_CONFIG]))):\n self.node.get_logger().warn('Unable to set IMU into config mode.')\n\n if not (self.con.transmit(registers.BNO055_PWR_MODE_ADDR, 1, bytes([registers.POWER_MODE_NORMAL]))):\n self.node.get_logger().warn('Unable to set IMU normal power mode.')\n\n if not (self.con.transmit(registers.BNO055_PAGE_ID_ADDR, 1, bytes([0x00]))):\n self.node.get_logger().warn('Unable to set IMU register page 0.')\n\n if not (self.con.transmit(registers.BNO055_SYS_TRIGGER_ADDR, 1, bytes([0x00]))):\n self.node.get_logger().warn('Unable to start IMU.')\n\n if not (self.con.transmit(registers.BNO055_UNIT_SEL_ADDR, 1, bytes([0x83]))):\n self.node.get_logger().warn('Unable to set IMU units.')\n\n # The sensor placement configuration (Axis remapping) defines the\n # position and orientation of the sensor mount.\n # See also Bosch BNO055 datasheet section Axis Remap\n mount_positions = {\n 'P0': bytes(b'\\x21\\x04'),\n 'P1': bytes(b'\\x24\\x00'),\n 'P2': bytes(b'\\x24\\x06'),\n 'P3': bytes(b'\\x21\\x02'),\n 'P4': bytes(b'\\x24\\x03'),\n 'P5': bytes(b'\\x21\\x02'),\n 'P6': bytes(b'\\x21\\x07'),\n 'P7': bytes(b'\\x24\\x05')\n }\n if not (self.con.transmit(registers.BNO055_AXIS_MAP_CONFIG_ADDR, 2,\n mount_positions[self.param.placement_axis_remap.value])):\n self.node.get_logger().warn('Unable to set sensor placement configuration.')\n\n # Show the current sensor offsets\n self.node.get_logger().info('Current sensor offsets:')\n self.print_calib_data()\n if self.param.set_offsets.value:\n configured_offsets = \\\n self.set_calib_offsets(\n self.param.offset_acc,\n self.param.offset_mag,\n self.param.offset_gyr,\n self.param.radius_mag,\n self.param.radius_acc)\n if configured_offsets:\n self.node.get_logger().info('Successfully configured sensor offsets to:')\n self.print_calib_data()\n else:\n self.node.get_logger().warn('setting offsets failed')\n\n\n # Set Device mode\n device_mode = self.param.operation_mode.value\n self.node.get_logger().info(f\"Setting device_mode to {device_mode}\")\n\n if not (self.con.transmit(registers.BNO055_OPR_MODE_ADDR, 1, bytes([device_mode]))):\n self.node.get_logger().warn('Unable to set IMU operation mode into operation mode.')\n\n self.node.get_logger().info('Bosch BNO055 IMU configuration complete.')",
"def setUp(self):\n super().setUp()\n for intf in self.send_ifs:\n self.vapi.ip_reassembly_enable_disable(\n sw_if_index=intf.sw_if_index, enable_ip6=True\n )\n self.vapi.ip_reassembly_set(\n timeout_ms=0,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n expire_walk_interval_ms=10,\n is_ip6=1,\n )\n self.virtual_sleep(0.25)\n self.vapi.ip_reassembly_set(\n timeout_ms=1000000,\n max_reassemblies=1000,\n max_reassembly_length=1000,\n expire_walk_interval_ms=1000,\n is_ip6=1,\n )",
"def gather_system_versions(self):\n # Get Mac model ID\n self.hw_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"model\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n\n if \"imacpro\" in self.hw_version.lower():\n # iMac Pro stores it's EFI data different due it's new architecture\n # so grab the EFI & SMC ROM versions appropriately\n raw_efi_list = []\n raw_rom_info = str(\n IORegistryEntryCreateCFProperty(\n IORegistryEntryFromPath(\n 0,\n \"IODeviceTree:/rom\"),\n \"apple-rom-info\",\n None,\n 0))\n for data in raw_rom_info.split(\"\\n\"):\n if data.strip().startswith(\"BIOS ID\"):\n raw_efi_list = data.split(\":\")[1].strip().split(\".\")\n break\n else:\n self.message(\n \"[-] Could not find raw EFI data to determine EFI versions. Exiting....\")\n return False\n\n self.efi_version = \"%s.%s.%s\" % (\n raw_efi_list[0], raw_efi_list[2], raw_efi_list[3])\n # Can't currently find the SMC version like this on imac pros ....\n # self.smc_version = str(IORegistryEntryCreateCFProperty(IOServiceGetMatchingService(0, IOServiceMatching(\"AppleSMC\")), \"smc-version\", None, 0))\n self.smc_version = \"\"\n else:\n # EFI & SMC ROM versions\n self.smc_version = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"AppleSMC\")),\n \"smc-version\",\n None,\n 0))\n raw_efi = str(\n IORegistryEntryCreateCFProperty(\n IORegistryEntryFromPath(\n 0,\n \"IODeviceTree:/rom\"),\n \"version\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\").split(\".\")\n self.efi_version = \"%s.%s.%s\" % (\n raw_efi[0], raw_efi[2], raw_efi[3])\n\n # Set the salt to be the MAC address of the system, using the MAC as a salt in this manner\n # helps ensure that the hashed sysuuid is pseudonymous. We don't want to know the sysuuid's\n # value, but we do want it to be unique however. The Salt value is\n # never submitted to the API\n salt = hex(getnode())\n sys_uuid = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"IOPlatformUUID\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n self.h_sys_uuid = hashlib.sha256(salt + sys_uuid).hexdigest()\n\n # Get the Board-ID, this is how EFI files are matched to running\n # hardware - Nastee\n self.board_id = str(\n IORegistryEntryCreateCFProperty(\n IOServiceGetMatchingService(\n 0,\n IOServiceMatching(\"IOPlatformExpertDevice\")),\n \"board-id\",\n None,\n 0)).replace(\n \"\\x00\",\n \"\")\n\n # Get OS version\n self.os_version = commands.getoutput(\"sw_vers -productVersion\")\n\n # Get build number\n self.build_num = commands.getoutput(\"sw_vers -buildVersion\")\n\n # Carve out the major version as we use this a bunch\n # self.os_maj_ver = \".\".join(self.os_version.split(\".\")[:2])\n\n # Add gathered info to the dictionary to query the API with\n self.endpoints_to_check[\"127.0.0.1\"] = {\n \"hashed_uuid\": self.h_sys_uuid,\n \"hw_ver\": self.hw_version,\n \"rom_ver\": self.efi_version,\n \"smc_ver\": self.smc_version,\n \"board_id\": self.board_id,\n \"os_ver\": self.os_version,\n \"build_num\": self.build_num}\n\n return True",
"def set_bios_settings(self, data=None):\n\n if not data:\n raise exception.SDFlexError(\"Could not apply settings with\"\n \" empty data\")\n sushy_system = self._get_sushy_system()\n\n try:\n for key in data.keys():\n sushy_system.bios.set_attribute(key, data[key])\n except sushy.exceptions.SushyError as e:\n message_extended_info = e.body.get('@Message.ExtendedInfo')\n error_message = message_extended_info[0]['Message']\n\n msg = (self._(\"Setting the value of Bios attribute \"\n \"'%(atrribute)s' is not succesfull. \"\n \"Error: %(error)s\") %\n {'error': str(error_message), 'atrribute': key})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)",
"def ipmi_setup():\n\n verify_ipmi_user_parm_accepted()",
"def set_interface(self, iface):\n\t\tf = os.path.join(self.config_dir, \"iface-%s\" % LibvirtFile.TEMPLATE_FILE)\n\t\tself.iface_xml = cziso.fill_template(f, iface=iface)",
"def prepare_node(self, node):\n self.interface = IpmiInterface(\n node.get('fencer-ip'),\n node.get('fencer-user'),\n node.get('fencer-password'),\n verbose=CONF.debug)",
"def test_get_bios_boot_mode_by_moid(self):\n pass",
"def setup_vm_env(self, driver='default'):\n if self.env_done:\n return\n\n # bind to default driver\n self.bind_nic_driver(self.dut_ports[:2], driver=\"\")\n\n self.used_dut_port_0 = self.dut_ports[0]\n self.host_intf0 = self.dut.ports_info[self.used_dut_port_0]['intf']\n tester_port = self.tester.get_local_port(self.used_dut_port_0)\n self.tester_intf0 = self.tester.get_interface(tester_port)\n\n self.dut.generate_sriov_vfs_by_port(\n self.used_dut_port_0, 1, driver=driver)\n self.sriov_vfs_port_0 = self.dut.ports_info[\n self.used_dut_port_0]['vfs_port']\n self.vf0_mac = \"00:10:00:00:00:00\"\n self.dut.send_expect(\"ip link set %s vf 0 mac %s\" %\n (self.host_intf0, self.vf0_mac), \"# \")\n\n self.used_dut_port_1 = self.dut_ports[1]\n self.host_intf1 = self.dut.ports_info[self.used_dut_port_1]['intf']\n self.dut.generate_sriov_vfs_by_port(\n self.used_dut_port_1, 1, driver=driver)\n self.sriov_vfs_port_1 = self.dut.ports_info[\n self.used_dut_port_1]['vfs_port']\n tester_port = self.tester.get_local_port(self.used_dut_port_1)\n self.tester_intf1 = self.tester.get_interface(tester_port)\n\n self.vf1_mac = \"00:20:00:00:00:00\"\n self.dut.send_expect(\"ip link set %s vf 0 mac %s\" %\n (self.host_intf1, self.vf1_mac), \"# \")\n\n try:\n\n for port in self.sriov_vfs_port_0:\n port.bind_driver('pci-stub')\n\n for port in self.sriov_vfs_port_1:\n port.bind_driver('pci-stub')\n\n time.sleep(1)\n vf0_prop = {'opt_host': self.sriov_vfs_port_0[0].pci}\n vf1_prop = {'opt_host': self.sriov_vfs_port_1[0].pci}\n\n # set up VM0 ENV\n self.vm0 = QEMUKvm(self.dut, 'vm0', 'vf_vlan')\n self.vm0.set_vm_device(driver='pci-assign', **vf0_prop)\n self.vm0.set_vm_device(driver='pci-assign', **vf1_prop)\n self.vm_dut_0 = self.vm0.start()\n if self.vm_dut_0 is None:\n raise Exception(\"Set up VM0 ENV failed!\")\n\n except Exception as e:\n self.destroy_vm_env()\n raise Exception(e)\n\n self.env_done = True",
"def set_one_time_boot(self, device, mac=None):\n self._update_persistent_boot([device], persistent=False, mac=mac)",
"def setup_device(device):\n try:\n # Gets around \"Resource busy\" errors\n device.detach_kernel_driver(0)\n except Exception:\n pass\n device.set_configuration()",
"def base_setup(self, request, interface_iterate):\n self.interface = interface_iterate\n\n if self.interface.lower() == \"cephfs\":\n self.interface = constants.CEPHFILESYSTEM\n self.sc_obj = constants.DEFAULT_STORAGECLASS_CEPHFS\n if self.interface.lower() == \"rbd\":\n self.interface = constants.CEPHBLOCKPOOL\n self.sc_obj = constants.DEFAULT_STORAGECLASS_RBD",
"def __init__(self, machine):\n super().__init__(machine)\n self.features['has_i2c'] = True",
"def __init__(self, machine):\n super().__init__(machine)\n self.features['has_i2c'] = True",
"def command_setup(self, *args):\n def usage():\n print(self.command_setup.__doc__)\n sys.exit(1)\n\n if len(args) == 0:\n usage()\n\n try:\n # All of these (except mount_opt) map directly to the model properties\n # We allow several `mount_opt` flags and merge their values, before\n # assigning to the `mount_opts` property (which expects a list).\n fields = [\n \"id\", \"host\", \"port\", \"user\",\n \"mount_opt\", \"mount_point\",\n \"ssh_key\", \"cmd_before_mount\",\n \"auth_method\",\n ]\n opts, _ = getopt.getopt(args, \"\", [\"%s=\" % s for s in fields])\n except getopt.GetoptError as e:\n sys.stderr.write('Error: %s\\n\\n' % e)\n usage()\n\n system = SystemModel()\n mount_opts = []\n for name, value in opts:\n name = name.lstrip('-')\n if not hasattr(system, name):\n continue\n if name == 'mount_opt':\n mount_opts.append(value)\n continue\n setattr(system, name, value)\n system.mount_opts = mount_opts\n\n is_valid, errors = system.validate()\n if not is_valid:\n sys.stderr.write('Invalid data found:\\n')\n for field_name, msg in errors:\n sys.stderr.write(' - %s / %s\\n' % (field_name, msg))\n sys.stderr.write('\\n')\n usage()\n sys.exit(1)\n\n system.save(self.environment)\n print('Configuration created.')\n print('You can try mounting now: `sftpman mount %s`' % system.id)",
"def do_setup(self, context):\n super(RBDISCSIDriver, self).do_setup(context)\n if client is None:\n msg = _(\"You must install rbd-iscsi-client python package \"\n \"before using this driver.\")\n raise exception.VolumeDriverException(data=msg)\n\n # Make sure we have the basic settings we need to talk to the\n # iscsi api service\n config = self.configuration\n self.client = self._create_client()\n self.client.set_debug_flag(config.safe_get('rbd_iscsi_api_debug'))\n resp, body = self.client.get_api()\n if not self._is_status_200(resp):\n # failed to fetch the open api url\n raise exception.InvalidConfigurationValue(\n option='rbd_iscsi_api_url',\n value='Could not talk to the rbd-target-api')\n\n # The admin had to have setup a target_iqn in the iscsi gateway\n # already in order for the gateways to work properly\n self.target_iqn = self.configuration.safe_get('rbd_iscsi_target_iqn')\n LOG.info(\"Using target_iqn '%s'\", self.target_iqn)",
"def change_mac(interface, mac):\r\n print(\"Changing MAC-address for \" + interface + \" to \" + mac)\r\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"down\"])\r\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"hw\", \"ether\", mac])\r\n subprocess.call([\"sudo\", \"ifconfig\", interface, \"up\"])",
"def main():\n kernel_params = _parse_kernel_cmdline()\n api_url = kernel_params.get('ipa-api-url')\n if api_url is None:\n _process_error('Mandatory kernel parameter \"ipa-api-url\" is missing.')\n\n boot_mac = kernel_params.get('BOOTIF')\n if boot_mac is None:\n _process_error('Cannot define boot interface, \"BOOTIF\" kernel '\n 'parameter is missing.')\n\n # There is a difference in syntax in BOOTIF variable between pxe and ipxe\n # boot with Ironic.\n # For pxe boot the the leading `01-' denotes the device type (Ethernet)\n # and is not a part of the MAC address\n if boot_mac.startswith('01-'):\n boot_mac = boot_mac[3:].replace('-', ':')\n\n # FIXME(pas-ha) discover all MACs\n node = lookup(api_url, [boot_mac])\n uuid = node['node']['uuid']\n timeout = node['config']['heartbeat_timeout']\n\n heartbeat_url = '{api_url}/v1/heartbeat/{uuid}'.format(api_url=api_url,\n uuid=uuid)\n for n in range(_GET_ADDR_MAX_ITERATION):\n boot_ip = _get_interface_ip(boot_mac)\n if boot_ip is not None:\n break\n time.sleep(_RETRY_INTERVAL)\n else:\n _process_error('Cannot find IP address of boot interface.')\n\n heartbeat(heartbeat_url, boot_ip, timeout)"
] | [
"0.6869196",
"0.6160965",
"0.5724841",
"0.56476253",
"0.5574767",
"0.5545499",
"0.54519165",
"0.5236378",
"0.51754254",
"0.5034209",
"0.5028963",
"0.49785176",
"0.49782223",
"0.49486035",
"0.49466297",
"0.49353293",
"0.4927631",
"0.49271697",
"0.4922526",
"0.49185145",
"0.4895232",
"0.48668995",
"0.48619774",
"0.48566747",
"0.4855276",
"0.4855276",
"0.4834752",
"0.48255107",
"0.48132867",
"0.48129967"
] | 0.79104954 | 0 |
Disable iscsi boot option in uefi boot mode. | def unset_iscsi_boot_info(self, mac):
if(self._is_boot_mode_uefi() is True):
iscsi_info = {'iSCSIBootEnable': 'Disabled'}
self._change_iscsi_settings(mac.upper(), iscsi_info)
else:
msg = 'iscsi boot is not supported in the BIOS boot mode'
raise exception.IloCommandNotSupportedInBiosError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset_secure_boot_keys(self):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('ResetToDefaultKeys', True)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def test_patch_bios_boot_mode(self):\n pass",
"def test_update_bios_boot_mode(self):\n pass",
"def safe_boot_disabled(self, safe_boot_disabled):\n\n self._safe_boot_disabled = safe_boot_disabled",
"def set_boot_device(self, task, device, persistent=False):\n if (getattr(task.node, 'power_interface') == 'ipmitool'\n or task.node.driver_internal_info.get('irmc_ipmi_succeed')):\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(_(\n \"Invalid boot device %s specified.\") % device)\n\n uefi_mode = (\n boot_mode_utils.get_boot_mode(task.node) == 'uefi')\n\n # disable 60 secs timer\n timeout_disable = \"0x00 0x08 0x03 0x08\"\n ipmitool.send_raw(task, timeout_disable)\n\n # note(naohirot):\n # Set System Boot Options : ipmi cmd '0x08', bootparam '0x05'\n #\n # $ ipmitool raw 0x00 0x08 0x05 data1 data2 0x00 0x00 0x00\n #\n # data1 : '0xe0' persistent + uefi\n # '0xc0' persistent + bios\n # '0xa0' next only + uefi\n # '0x80' next only + bios\n # data2 : boot device defined in the dict _BOOTPARAM5_DATA2\n\n bootparam5 = '0x00 0x08 0x05 %s %s 0x00 0x00 0x00'\n if persistent:\n data1 = '0xe0' if uefi_mode else '0xc0'\n else:\n data1 = '0xa0' if uefi_mode else '0x80'\n data2 = _BOOTPARAM5_DATA2[device]\n\n cmd8 = bootparam5 % (data1, data2)\n ipmitool.send_raw(task, cmd8)\n else:\n if device not in self.get_supported_boot_devices(task):\n raise exception.InvalidParameterValue(_(\n \"Invalid boot device %s specified. \"\n \"Current iRMC firmware condition doesn't support IPMI \"\n \"but Redfish.\") % device)\n super(ipmitool.IPMIManagement, self).set_boot_device(\n task, device, persistent)",
"def unconfigure_global_dual_active_recovery_reload_disable(device):\n # build a list of commands to send\n # Add stackwise-virtual as first element in the list\n # Enables dual-active recovery-reload\n command_list = ['stackwise-virtual']\n command_list.append(f'no dual-active recovery-reload-disable')\n try:\n output = device.configure(command_list)\n except SubCommandFailure:\n raise SubCommandFailure('Failed to Disable global stackwise-virtual dual-active recovery-reload')\n return output",
"def safe_boot_disabled(self):\n return self._safe_boot_disabled",
"def configure_global_dual_active_recovery_reload_disable(device):\n # build a list of commands to send\n # Add stackwise-virtual as first element in the list\n # Disables dual-active recovery-reload\n command_list = ['stackwise-virtual']\n command_list.append(f'dual-active recovery-reload-disable')\n try:\n output = device.configure(command_list)\n except SubCommandFailure:\n raise SubCommandFailure('Failed to Enable global stackwise-virtual dual-active recovery-reload')\n return output",
"def _DisableRootFsVerification(self):\n # 2 and 4 are the kernel partitions.\n for partition in [2, 4]:\n self.RunCmdOnDevice(['/usr/share/vboot/bin/make_dev_ssd.sh',\n '--partitions', str(partition),\n '--remove_rootfs_verification', '--force'])\n\n # Restart, wait a bit, and re-establish the SSH master connection.\n # We need to close the connection gracefully, then run the shutdown command\n # without using a master connection. port_forward=True bypasses the master\n # connection.\n self.CloseConnection()\n self.RunCmdOnDevice(['reboot'], port_forward=True)\n time.sleep(30)\n self.OpenConnection()",
"def set_boot_mode(self, task, mode):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='set_boot_mode')",
"def setOff(self, command):\r\n self.setDriver('ST', 0)",
"def disable_irq() -> int:",
"def bdev_nvme_disable_controller(client, name, cntlid):\n\n params = {'name': name}\n\n if cntlid is not None:\n params['cntlid'] = cntlid\n\n return client.call('bdev_nvme_disable_controller', params)",
"def systemOff():\n # Updated 11/19/16\n I2C.write_byte_data(Valve_bus, pinOut_O, 0x00 )\n I2C.write_byte_data(Pump_Mag_bus, pinOut_O, 0x00)",
"def _remove_bios_config(task, reboot_flag=False):\n task.node.del_driver_internal_info('irmc_bios_config')\n # NOTE(tiendc): If reboot flag is raised, then the BM will\n # reboot and cause a bug if the next clean step is in-band.\n # See https://storyboard.openstack.org/#!/story/2002731\n if reboot_flag:\n task.node.set_driver_internal_info('cleaning_reboot', True)\n task.node.save()",
"def _nixie_disable():\n # type: () -> None\n GPIO.output(NIXIE_nOE, GPIO.HIGH)",
"def disable():\n if _status_apf():\n return __apf_cmd(\"-f\")",
"def supported_boot_interfaces(self):\n return [fake.FakeBoot] + super().supported_boot_interfaces",
"def soft_shutdown(self, wait_for_board_off=False):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def switch_off(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def set_secure_boot_mode(self, secure_boot_enable):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('SecureBootEnable',\n secure_boot_enable)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def set_pending_boot_mode(self, boot_mode):\n boot_mode = boot_mode.lower()\n if boot_mode not in ['uefi', 'legacy']:\n msg = 'Invalid Boot mode specified'\n raise exception.IloInvalidInputError(msg)\n\n boot_properties = {'BootMode': boot_mode}\n\n if boot_mode == 'legacy':\n boot_properties['BootMode'] = 'LegacyBios'\n else:\n # If Boot Mode is 'Uefi' set the UEFIOptimizedBoot first.\n boot_properties['UefiOptimizedBoot'] = \"Enabled\"\n\n # Change the Boot Mode\n self._change_bios_setting(boot_properties)",
"def turn_off(self):\n self._state = False\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":0 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'): \n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":0 }', 5)",
"def powerOff(self):\n self._sendCommand(self.SONY_CMD_ExtBackupCommunicator_ForcePowerOff, bufferSize=0)",
"def test_get_bios_boot_mode_by_moid(self):\n pass",
"async def async_turn_off(self):\n path = \"/interface\"\n param = \"default-name\"\n if \"-\" in self._data[\"port-mac-address\"]:\n param = \"name\"\n value = self._data[param]\n mod_param = \"disabled\"\n mod_value = True\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n\n if self._data[\"poe-out\"] == \"auto-on\":\n path = \"/interface/ethernet\"\n self._ctrl.set_value(path, param, value, \"poe-out\", \"off\")\n\n await self._ctrl.async_update()",
"def get_boot_device(self, task):\n if (getattr(task.node, 'power_interface') == 'ipmitool'\n or task.node.driver_internal_info.get('irmc_ipmi_succeed')):\n return super(IRMCManagement, self).get_boot_device(task)\n else:\n return super(\n ipmitool.IPMIManagement, self).get_boot_device(task)",
"def _doDisableRegulation(self):\n self._cmdRegulOff()",
"def get_boot_mode(self, task):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='get_boot_mode')",
"def disable_weapon(self, weapon):\n if weapon == \"nothing\":\n weapon = 0\n elif weapon == \"main\":\n weapon = 1\n elif weapon == \"secondary\":\n weapon = 2\n elif weapon == \"everything\":\n weapon = 3\n cmd = '{}testDisableWeaponMode {}'.format(self.console, weapon)\n self.write_command(cmd)"
] | [
"0.608217",
"0.60647833",
"0.60542953",
"0.60381645",
"0.6015318",
"0.59813994",
"0.5816549",
"0.57793945",
"0.5776712",
"0.5750608",
"0.5560355",
"0.5523266",
"0.55037075",
"0.5491119",
"0.5489613",
"0.54749763",
"0.54649585",
"0.5449469",
"0.54409015",
"0.539503",
"0.53881",
"0.53875077",
"0.53786755",
"0.5376367",
"0.5360541",
"0.5359437",
"0.53442544",
"0.53436774",
"0.53394485",
"0.53392154"
] | 0.77110964 | 0 |
Retrieves the current boot mode of the server. | def get_current_boot_mode(self):
boot_mode = self._get_bios_setting('BootMode')
if boot_mode == 'LegacyBios':
boot_mode = 'legacy'
return boot_mode.upper() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode",
"def get_pending_boot_mode(self):\n headers, uri, bios_settings = self._check_bios_resource(['BootMode'])\n _, _, settings = self._get_bios_settings_resource(bios_settings)\n boot_mode = settings.get('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n return boot_mode.upper()",
"def get_boot_mode(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def get_secure_boot_mode(self):\n system = self._get_host_details()\n\n if ('links' not in system['Oem']['Hp'] or\n 'SecureBoot' not in system['Oem']['Hp']['links']):\n msg = ('\"SecureBoot\" resource or feature is not supported'\n ' on this system')\n raise exception.IloCommandNotSupportedError(msg)\n\n secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']\n\n # get the Secure Boot object\n status, headers, secure_boot_settings = self._rest_get(secure_boot_uri)\n\n if status >= 300:\n msg = self._get_extended_error(secure_boot_settings)\n raise exception.IloError(msg)\n\n return secure_boot_settings['SecureBootCurrentState']",
"def get_boot_mode(self, task):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='get_boot_mode')",
"def get_secure_boot_mode(self):\n sushy_system = self._get_sushy_system()\n try:\n secure_boot_enabled = GET_SECUREBOOT_CURRENT_BOOT_MAP.get(\n sushy_system.secure_boot.current_boot)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to provide '\n 'information about secure boot on the server. '\n 'Error: %(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexCommandNotSupportedError(msg)\n\n if secure_boot_enabled:\n LOG.debug(self._(\"Secure boot is Enabled\"))\n else:\n LOG.debug(self._(\"Secure boot is Disabled\"))\n return secure_boot_enabled",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def get_mode(self):\r\n return self._api.get_mode()",
"def get_boot_driver(self):\n return self._boot_driver",
"def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')",
"def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')",
"def test_get_bios_boot_mode_by_moid(self):\n pass",
"def test_get_bios_boot_mode_list(self):\n pass",
"def get_secure_boot_state(self, task):\n return irmc_common.get_secure_boot_mode(task.node)",
"def get_one_time_boot(self):\n system = self._get_host_details()\n try:\n if system['Boot']['BootSourceOverrideEnabled'] == 'Once':\n device = system['Boot']['BootSourceOverrideTarget']\n if device in DEVICE_RIS_TO_COMMON:\n return DEVICE_RIS_TO_COMMON[device]\n return device\n else:\n # value returned by RIBCL if one-time boot setting are absent\n return 'Normal'\n\n except KeyError as e:\n msg = \"get_one_time_boot failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)",
"def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault",
"def is_bootable(self):\n return self.bootable_flag == 0x80",
"def getmode(self):\n return self.mode",
"def get_mode(self):\r\n return self.mode",
"def get_mode(self, ):\n return self.get_parameter('mode')",
"def get_current_mode(self):\n return self.read(0xa2)",
"def get_mode(self, port):\n port = int(port)\n self._validate_port(\"get_mode\", port)\n flags = self._regex_shell_fn(\n self._command_dict[\"GET_MODE\"].format(port),\n self._regex_dict[\"GET_MODE_REGEX\"],\n tries=5)\n\n if \"O\" in flags:\n mode = OFF\n elif \"S\" in flags:\n mode = SYNC\n else:\n mode = CHARGE\n return mode",
"def mode(self):\n return self._data.get('mode', None)",
"def get_app_mode(self):\n\t\treturn call_sdk_function('PrlApi_GetAppMode')",
"def system(self, mode=None):\n if mode == System.AUTO:\n self.change_request[\"SystemSwitch\"] = System.AUTO\n elif mode == System.COOL:\n self.change_request[\"SystemSwitch\"] = System.COOL\n elif mode == System.HEAT:\n self.change_request[\"SystemSwitch\"] = System.HEAT\n elif mode == System.OFF:\n self.change_request[\"SystemSwitch\"] = System.OFF\n else:\n return False\n return self.change_request[\"SystemSwitch\"]",
"def getMode(self):\n return self._mode",
"def hvac_mode(self):\n if self.ac.status is None:\n _LOGGER.debug(f\"hvac_mode: status is None, returning None\")\n return None\n if self.ac.status.is_on:\n ac_mode = self.ac.status.ac_mode\n value = self.HVAC_MODE_MAPPING[ac_mode]\n _LOGGER.debug(f\"hvac_mode: returning {value} (derived from {ac_mode})\")\n return value\n else:\n _LOGGER.debug(f\"hvac_mode: returning HVAC_MODE_OFF - device is off\")\n return HVAC_MODE_OFF",
"def dev_mode(self):\r\n return self._dev_mode",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")",
"def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")"
] | [
"0.77743053",
"0.75218076",
"0.7441925",
"0.7247707",
"0.7066327",
"0.6706312",
"0.6605948",
"0.6499532",
"0.64143753",
"0.6414196",
"0.64062566",
"0.6375331",
"0.6338292",
"0.63119465",
"0.6295608",
"0.6240478",
"0.6215727",
"0.61063474",
"0.60902596",
"0.60828465",
"0.6080002",
"0.6025562",
"0.60031265",
"0.59920686",
"0.59810793",
"0.5972829",
"0.59713215",
"0.5922564",
"0.5886268",
"0.5886268"
] | 0.78080875 | 0 |
Retrieves the pending boot mode of the server. Gets the boot mode to be set on next reset. | def get_pending_boot_mode(self):
headers, uri, bios_settings = self._check_bios_resource(['BootMode'])
_, _, settings = self._get_bios_settings_resource(bios_settings)
boot_mode = settings.get('BootMode')
if boot_mode == 'LegacyBios':
boot_mode = 'legacy'
return boot_mode.upper() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode",
"def get_current_boot_mode(self):\n boot_mode = self._get_bios_setting('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n\n return boot_mode.upper()",
"def get_boot_mode(self, task):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='get_boot_mode')",
"def get_boot_mode(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def get_secure_boot_mode(self):\n system = self._get_host_details()\n\n if ('links' not in system['Oem']['Hp'] or\n 'SecureBoot' not in system['Oem']['Hp']['links']):\n msg = ('\"SecureBoot\" resource or feature is not supported'\n ' on this system')\n raise exception.IloCommandNotSupportedError(msg)\n\n secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']\n\n # get the Secure Boot object\n status, headers, secure_boot_settings = self._rest_get(secure_boot_uri)\n\n if status >= 300:\n msg = self._get_extended_error(secure_boot_settings)\n raise exception.IloError(msg)\n\n return secure_boot_settings['SecureBootCurrentState']",
"def get_secure_boot_state(self, task):\n return irmc_common.get_secure_boot_mode(task.node)",
"def get_secure_boot_mode(self):\n sushy_system = self._get_sushy_system()\n try:\n secure_boot_enabled = GET_SECUREBOOT_CURRENT_BOOT_MAP.get(\n sushy_system.secure_boot.current_boot)\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish controller failed to provide '\n 'information about secure boot on the server. '\n 'Error: %(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexCommandNotSupportedError(msg)\n\n if secure_boot_enabled:\n LOG.debug(self._(\"Secure boot is Enabled\"))\n else:\n LOG.debug(self._(\"Secure boot is Disabled\"))\n return secure_boot_enabled",
"def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')",
"def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')",
"def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)",
"def set_pending_boot_mode(self, boot_mode):\n boot_mode = boot_mode.lower()\n if boot_mode not in ['uefi', 'legacy']:\n msg = 'Invalid Boot mode specified'\n raise exception.IloInvalidInputError(msg)\n\n boot_properties = {'BootMode': boot_mode}\n\n if boot_mode == 'legacy':\n boot_properties['BootMode'] = 'LegacyBios'\n else:\n # If Boot Mode is 'Uefi' set the UEFIOptimizedBoot first.\n boot_properties['UefiOptimizedBoot'] = \"Enabled\"\n\n # Change the Boot Mode\n self._change_bios_setting(boot_properties)",
"def get_mode(self, port):\n port = int(port)\n self._validate_port(\"get_mode\", port)\n flags = self._regex_shell_fn(\n self._command_dict[\"GET_MODE\"].format(port),\n self._regex_dict[\"GET_MODE_REGEX\"],\n tries=5)\n\n if \"O\" in flags:\n mode = OFF\n elif \"S\" in flags:\n mode = SYNC\n else:\n mode = CHARGE\n return mode",
"def get_mode(self):\r\n return self._api.get_mode()",
"def get_boot_driver(self):\n return self._boot_driver",
"def test_get_bios_boot_mode_by_moid(self):\n pass",
"def test_get_bios_boot_mode_list(self):\n pass",
"def get_one_time_boot(self):\n system = self._get_host_details()\n try:\n if system['Boot']['BootSourceOverrideEnabled'] == 'Once':\n device = system['Boot']['BootSourceOverrideTarget']\n if device in DEVICE_RIS_TO_COMMON:\n return DEVICE_RIS_TO_COMMON[device]\n return device\n else:\n # value returned by RIBCL if one-time boot setting are absent\n return 'Normal'\n\n except KeyError as e:\n msg = \"get_one_time_boot failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)",
"def wait_boot(self) -> int:\n return self._data[ATTR_WAIT_BOOT]",
"def getMode(self):\n with self.lock:\n mode = self.mode\n return mode",
"def get_supported_boot_modes(self, task):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='get_supported_boot_modes')",
"def get_preferred_mode(self):\n ret = self._transfer(TVGetModes())\n return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None",
"def boot(self):\n\t\tmesslen, received = self.socket.send('bootm\\r', 25)\t\t\n\t\treturn None",
"def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault",
"def get_current_mode(self):\n return self.read(0xa2)",
"def getMode(self):\n return self._mode",
"def system(self, mode=None):\n if mode == System.AUTO:\n self.change_request[\"SystemSwitch\"] = System.AUTO\n elif mode == System.COOL:\n self.change_request[\"SystemSwitch\"] = System.COOL\n elif mode == System.HEAT:\n self.change_request[\"SystemSwitch\"] = System.HEAT\n elif mode == System.OFF:\n self.change_request[\"SystemSwitch\"] = System.OFF\n else:\n return False\n return self.change_request[\"SystemSwitch\"]",
"def getmode(self):\n return self.mode",
"def preset_mode(self) -> str | None:\n\n if self._device.tcs.system_mode is None:\n return # unable to determine\n # if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in MODE_TCS_TO_HA:\n if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in (\n SystemMode.AWAY,\n SystemMode.HEAT_OFF,\n ):\n return PRESET_TCS_TO_HA[self._device.tcs.system_mode[CONF_SYSTEM_MODE]]\n\n if self._device.mode is None:\n return # unable to determine\n if self._device.mode[CONF_MODE] == ZoneMode.SCHEDULE:\n return PRESET_TCS_TO_HA[self._device.tcs.system_mode[CONF_SYSTEM_MODE]]\n return PRESET_ZONE_TO_HA.get(self._device.mode[CONF_MODE])",
"def get_mode(self):\r\n return self.mode",
"def mode_remote(self):\n self.send(\"!MR\")\n # time.sleep(2.0)\n # No feedback, so query to verify set\n got = self.get_mode()\n assert got == \"R\", got"
] | [
"0.70903546",
"0.69976276",
"0.6860447",
"0.6815118",
"0.66070074",
"0.62555677",
"0.6129522",
"0.6030498",
"0.6028887",
"0.6026956",
"0.6023533",
"0.5965988",
"0.58770573",
"0.5847371",
"0.5799201",
"0.5777145",
"0.5753953",
"0.57251155",
"0.5688327",
"0.56876516",
"0.5649138",
"0.56454",
"0.56306136",
"0.5603256",
"0.5579972",
"0.5577915",
"0.55605006",
"0.554544",
"0.55343264",
"0.552363"
] | 0.8131488 | 0 |
Sets the boot mode of the system for next boot. | def set_pending_boot_mode(self, boot_mode):
boot_mode = boot_mode.lower()
if boot_mode not in ['uefi', 'legacy']:
msg = 'Invalid Boot mode specified'
raise exception.IloInvalidInputError(msg)
boot_properties = {'BootMode': boot_mode}
if boot_mode == 'legacy':
boot_properties['BootMode'] = 'LegacyBios'
else:
# If Boot Mode is 'Uefi' set the UEFIOptimizedBoot first.
boot_properties['UefiOptimizedBoot'] = "Enabled"
# Change the Boot Mode
self._change_bios_setting(boot_properties) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_boot_mode(self, task, mode):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='set_boot_mode')",
"def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')",
"def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')",
"def boot(self, boot):\n\n self._boot = boot",
"def _setBootable(self, bootable):\n if self.partedPartition:\n if arch.isS390():\n return\n if self.flagAvailable(parted.PARTITION_BOOT):\n if bootable:\n self.setFlag(parted.PARTITION_BOOT)\n else:\n self.unsetFlag(parted.PARTITION_BOOT)\n else:\n raise errors.DeviceError(\"boot flag not available for this partition\", self.name)\n\n self._bootable = bootable\n else:\n self.req_bootable = bootable",
"def test_update_bios_boot_mode(self):\n pass",
"def test_patch_bios_boot_mode(self):\n pass",
"def boot(self):\n\n pass",
"def get_boot_mode(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def get_boot_mode(self, task):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='get_boot_mode')",
"def set_automatic(self, mode):\n self.slam.controlled = not mode\n if mode:\n self.slam.resume()",
"def set_one_time_boot(self, device, mac=None):\n self._update_persistent_boot([device], persistent=False, mac=mac)",
"def get_current_boot_mode(self):\n boot_mode = self._get_bios_setting('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n\n return boot_mode.upper()",
"def set_drive_mode(mode):",
"def wait_boot(self, value: int) -> None:\n self._data[ATTR_WAIT_BOOT] = value",
"def setmode(self, mode):\n # ueberpruefe, ob der Modus gueltig ist\n if mode in [GPIO.BCM, GPIO.BOARD]:\n self.mode = mode\n print(f\"Modus auf {mode} gesetzt\")\n else:\n raise ValueError(\"An invalid mode was passed to setmode()\")",
"def get_boot_mode():\n boot_mode = 'Legacy'\n try:\n reg_key = winreg.OpenKey(\n winreg.HKEY_LOCAL_MACHINE, r'System\\CurrentControlSet\\Control')\n reg_value = winreg.QueryValueEx(reg_key, 'PEFirmwareType')[0]\n if reg_value == 2:\n boot_mode = 'UEFI'\n except:\n boot_mode = 'Unknown'\n\n return boot_mode",
"def set_secure_boot_mode(self, secure_boot_enable):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('SecureBootEnable',\n secure_boot_enable)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def _select_mode(self):\n self.__check_mode()\n if self.mode[\"auto_mode\"]:\n self.mode_auto()\n elif self.mode[\"auto_mode\"] is None: # Do Nothing\n self.mode_standby()\n else:\n self.mode_manual()",
"def setBootargs(self):\n\t\tif self.testType == 'auto' or self.testType == 'manual':\n\t\t\tself.bootargs = self.settings.getKeyValue('nfs.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<nfsroot>', self.nfsroot)\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\n\t\telse:\n\t\t\tself.bootargs = self.settings.getKeyValue('ramdisk.fs')\n\t\t\tself.bootargs = self.bootargs.replace('<ipaddr>', self.ipaddr)\n\t\t\tself.socket.send('setenv bootargs ' + self.bootargs+'\\r', 1)\t\t\n\t\treturn None",
"def get_pending_boot_mode(self):\n headers, uri, bios_settings = self._check_bios_resource(['BootMode'])\n _, _, settings = self._get_bios_settings_resource(bios_settings)\n boot_mode = settings.get('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n return boot_mode.upper()",
"def set_boot_order(profile_obj):\n status = True\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"### Testing the 'Boot Settings' session ###\")\n logger._log_to_console_and_log_file(\"- Select the 'Legacy BIOS' mode\")\n createprofile_elements = ProfileContainer(ProfileContainerType.ADD)\n __select_value_from_a_profile_combo_box(createprofile_elements.ID_COMBO_PROFILE_BOOT_MODE, createprofile_elements.ID_COMBO_PROFILE_BOOT_MODE_LIST % \"Legacy BIOS\")\n # Set invalid values\n logger._log_to_console_and_log_file(\"Testing using invalid values\")\n for profile in profile_obj:\n items = [[\"CD\", profile.cd], [\"USB\", profile.usb], [\"HardDisk\", profile.harddisk]]\n for data in items:\n ui_lib.wait_for_element_and_input_text(\"name=%s\" % data[0], data[1])\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_CREATE_SERVER_PROFILE_FORM)\n if data[0] == \"HardDisk\":\n data[0] = \"Hard Disk\"\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_BOOT_ORDER_POSITION % data[0], data[1], timeout=1):\n logger._log_to_console_and_log_file(\"- \" + \"'\" + data[0] + \"'\" + \" field was not cleared to the default value and persisted as '\" + str(data[1]) + \"'\")\n status = False\n else:\n logger._log_to_console_and_log_file(\"- \" + \"'\" + data[0] + \"'\" + \" field was correctly cleared to the default value\")\n return status",
"def mode(self, mode):\n self.set_mode(mode)",
"def set_mode(self, mode):\n if mode in self.MODES:\n self.mode = self.MODES[mode]",
"def set_mode(self, mode):\n print('set_mode', mode)\n self._mode = int(mode)",
"def set_manual_mode(self):\n self._kernel.set_manual_mode()",
"def setbacklight(self, backlight=True):\n if backlight:\n self._backlight = 0x08\n else:\n self._backlight = 0x00\n\n self.lcd_byte(0x00 ,LCD_CMD)",
"def set_mode(self, mode=0, detection_param=0):\r\n return self._arm.set_mode(mode=mode, detection_param=detection_param)",
"def set_vm_status(self, boot_on_next_reset):\n data = {\n \"Oem\": {\n \"Hpe\": {\n \"BootOnNextServerReset\": boot_on_next_reset\n }\n }\n }\n self._conn.patch(self.path, data=data)",
"def set_preset_mode(self, preset_mode: str | None) -> None:\n self.svc_set_system_mode(PRESET_TO_TCS.get(preset_mode, SystemMode.AUTO))"
] | [
"0.7535342",
"0.7406527",
"0.7319358",
"0.6997932",
"0.66406643",
"0.6565158",
"0.6410105",
"0.6125415",
"0.6046522",
"0.60436237",
"0.6038874",
"0.6014684",
"0.6005457",
"0.6002688",
"0.59916985",
"0.59749866",
"0.5971754",
"0.59534365",
"0.5944353",
"0.5900143",
"0.58888465",
"0.5875209",
"0.58701897",
"0.5841792",
"0.58071274",
"0.57710737",
"0.57598937",
"0.5713621",
"0.5710734",
"0.570695"
] | 0.7734424 | 0 |
Resets the iLO password. | def reset_ilo_credential(self, password):
acc_uri = '/rest/v1/AccountService/Accounts'
for status, hds, account, memberuri in self._get_collection(acc_uri):
if account['UserName'] == self.login:
mod_user = {}
mod_user['Password'] = password
status, headers, response = self._rest_patch(memberuri,
None, mod_user)
if status != 200:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
return
msg = "iLO Account with specified username is not found."
raise exception.IloError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(ctx):\n\n controller = ctx.obj['controller']\n click.echo('Resetting OATH data...')\n old_id = controller.id\n controller.reset()\n\n settings = ctx.obj['settings']\n keys = settings.setdefault('keys', {})\n if old_id in keys:\n del keys[old_id]\n settings.write()\n\n click.echo(\n 'Success! All OATH credentials have been cleared from your YubiKey.')",
"def reset_password(self):\n self.password = passwordResetter(self.user_id, self.password)",
"def setpassword(self, pwd):\n pass",
"def reset_password(newpass, challenge):",
"def reset_password():\n pass",
"def reset(self):\n\n\t\tself._send_message(\"RESET\", \"\\x00\")",
"def password(self, password):\n self.password_hash = generate_password_hash(password)\n self.password_set = True",
"def clear_password(self, e):\n\n self.password.label.config(show='*')\n if self.password.get() == 'Enter Enovia Password':\n self.password.clear()",
"def reset(serial):\n if click.confirm(\n \"Warning: Your credentials will be lost!!! Do you wish to continue?\"\n ):\n print(\"Press the button to confirm -- again, your credentials will be lost!!!\")\n solo.client.find(serial).reset()\n click.echo(\"....aaaand they're gone\")",
"def reset(serial):\n if click.confirm(\n \"Warning: Your credentials will be lost!!! Do you wish to continue?\"\n ):\n print(\"Press the button to confirm -- again, your credentials will be lost!!!\")\n solo.client.find(serial).reset()\n click.echo(\"....aaaand they're gone\")",
"def _set_password(self, password):\n self._password = generate_password_hash(password)",
"def LdapResetPassword(self, record):\n password = self.login_pwd.generate_password()\n attrs = {}\n attrs['userPassword'] = self.login_pwd.encrypt_password(password)\n logger.debug(\"LDAP LdapResetPassword encrypt_password %s\"\n % (attrs['userPassword']))\n result = self.LdapModifyUser(record, attrs)\n return result",
"def set_pass(self, pw):\n\t\tself.passhash = generate_password_hash(pw)",
"def reset(self):\n self.state = \"YYYYRRRRGGGGOOOOBBBBWWWW\"",
"def setPassword(self, unhashPass):\n\t\tself.passHash = generate_password_hash(unhashPass)",
"def reset_merchant_pass(self, newpass):\n self.refresh()\n if not newpass:\n raise ValueError(\"Password must be defined\")\n\n updateshopobj = self.sc.get_updateshop_obj(\n {\n 'Alias': self.Alias,\n 'MerchantPassword': newpass,\n }\n )\n self.sc.update(updateshopobj)\n self.refresh()",
"def resetPassword(self, customerguid, password, jobguid=\"\", executionparams=None):",
"def test_010_change_user_password(self):\n\n testflow.step(\"Resetting password for user %s\", TEST_USER1)\n assert USER_CLI.run(\n 'password-reset',\n TEST_USER1,\n password='pass:%s' % self.user_password,\n password_valid_to='2100-01-01 11:11:11Z',\n )[0], \"Failed to change user's '%s' password\" % TEST_USER1",
"def reset(ctx, force):\n\n force or click.confirm(\n \"WARNING! This will delete all stored OATH accounts and restore factory \"\n \"settings. Proceed?\",\n abort=True,\n err=True,\n )\n\n session = ctx.obj[\"session\"]\n click.echo(\"Resetting OATH data...\")\n old_id = session.device_id\n session.reset()\n\n keys = ctx.obj[\"oath_keys\"]\n if old_id in keys:\n del keys[old_id]\n keys.write()\n logger.info(\"Deleted remembered access key\")\n\n click.echo(\"Success! All OATH accounts have been deleted from the YubiKey.\")",
"def set_password(self, password):\n self.PASS = password",
"def reset_password(user: User) -> Result[Password]:\n passwd = Password.new()\n command([\"/usr/sbin/chpasswd\"], passwd.wrap(\"{}:{{}}\".format(user.pw_name)))\n return Result(State.success, passwd)",
"def reset(self):\n self._keyCode = \"\"\n self._keyCodeCount = 0\n self._keyCodeTime = 0.0",
"def reset(self):\n self.string = self.axiom",
"def reset(self) -> None:\n self.memory = self.intcode.copy()\n self.ip = 0\n self.stdout.clear()",
"def reset(self):\n self.check_validity()\n\n self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_RESET, (), '', 0, '')",
"def set_password(self, password):\n self.PASSWORD = password",
"def set_password(self, password):\n self.cloudserver.change_password(password)",
"def reset(self):\n self._write(0x16, 1, 3, 0x08)",
"def set_password(self, password):\n self.password = generate_password_hash(password)",
"def set_password(self, new_password):\n super(Mafiasi, self).set_password(new_password)\n self.new_password = new_password"
] | [
"0.71026045",
"0.7095906",
"0.65982336",
"0.6317689",
"0.626955",
"0.622285",
"0.62090886",
"0.619031",
"0.6150298",
"0.6150298",
"0.6132683",
"0.6118869",
"0.60761297",
"0.6064993",
"0.605403",
"0.60505545",
"0.60464615",
"0.60406226",
"0.601135",
"0.59997255",
"0.5995621",
"0.59928083",
"0.59907496",
"0.59784484",
"0.5974175",
"0.5958172",
"0.5949247",
"0.5946935",
"0.5936411",
"0.5931389"
] | 0.72471446 | 0 |
Resets the BIOS settings to default values. | def reset_bios_to_default(self):
# Check if the BIOS resource if exists.
headers_bios, bios_uri, bios_settings = self._check_bios_resource()
# Get the BaseConfig resource.
try:
base_config_uri = bios_settings['links']['BaseConfigs']['href']
except KeyError:
msg = ("BaseConfigs resource not found. Couldn't apply the BIOS "
"Settings.")
raise exception.IloCommandNotSupportedError(msg)
# Check if BIOS resource supports patch, else get the settings
if not self._operation_allowed(headers_bios, 'PATCH'):
headers, bios_uri, _ = self._get_bios_settings_resource(
bios_settings)
self._validate_if_patch_supported(headers, bios_uri)
status, headers, config = self._rest_get(base_config_uri)
if status != 200:
msg = self._get_extended_error(config)
raise exception.IloError(msg)
new_bios_settings = {}
for cfg in config['BaseConfigs']:
default_settings = cfg.get('default', None)
if default_settings is not None:
new_bios_settings = default_settings
break
else:
msg = ("Default Settings not found in 'BaseConfigs' resource.")
raise exception.IloCommandNotSupportedError(msg)
request_headers = self._get_bios_hash_password(self.bios_password)
status, headers, response = self._rest_patch(bios_uri, request_headers,
new_bios_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\r\n # TODO: have reset flag such that it forces all the bottom changes\r\n self.pwm_freq = self._default[\"pwm_freq\"]\r\n self.gate_logic = self._default[\"gate_logic\"]\r\n self.max_pwm = self._default[\"max_pwm\"]\r\n self.lase_on_power_up = self._default[\"lase_on_power_up\"]\r\n\r\n self.mode = self._default[\"mode\"]\r\n self.lase = self._default[\"lase\"]\r\n self.percent = self._default[\"percent\"] # in percent\r",
"def reset(self):\n self.manager.delete_all()\n for name, val in DEFAULT_SETTINGS.items():\n val['name'] = name\n val['default_value'] = val['value']\n self.manager.from_dict(val)",
"def SetDefaults():\n winsound.MessageBeep()\n returnValue = MessageBox(0, u\"You are about to reset the settings, \"\n \"are you sure you want to contine?\"\n , u\"Reset settings file?\", 4)\n\n if returnValue == MB_YES:\n\n returnValue = MessageBox(0, u\"Settings successfully restored to default values\"\n , u\"Reset complete!\", 0)\n\n MySet = Settings()\n MySet.Save(settingsFile)",
"def reset_settings():\n settings = Settings()\n settings.reset()\n settings.save()",
"def reset():\n if os.name == \"posix\": #In linux\n os.system(\"clear\")\n elif os.name == (\"ce\", \"nt\", \"dos\"): #In windows\n os.system(\"cls\")",
"def reset(self):\n self.settings = None\n self.sublime_settings = None\n self.settings_base = \"Javatar.sublime-settings\"\n self.sublime_base = \"Preferences.sublime-settings\"",
"def reset( self ):\n self.conf = self.defaults",
"def reset(self):\n\n game.reset()\n sm.get_screen('game_screen').reset()",
"def reset(self):\n self._unset_defaults_and_overrides()\n self.clear()",
"def reset(self):\n self.reset_dev_via_serial(self.forced_reset_timeout)",
"def resetStoredDefaults( self ):\n keys= list( self._defDict.keys() )\n data= [ self._defDict[ aKey ] for aKey in keys ]\n \n self.prefObj.save( group= self.prefGroup, name= keys, data= data )\n self.resetSelfWithDefaults()",
"def restore_defaults(self):\n if messagebox.askyesno(\n message='Are you sure? '\n 'ALL SETTINGS will be reset to game defaults.\\n'\n 'You may need to re-install graphics afterwards.',\n title='Reset all settings to Defaults?', icon='question'):\n self.lnp.restore_defaults()\n messagebox.showinfo(\n self.root.title(),\n 'All settings reset to defaults!')",
"def reset(self):\n self._write(0x16, 1, 3, 0x08)",
"def reset_state(self):\n for name in self._buffers:\n self._buffers[name] = self._defaults[name]",
"def reset_to_factory(self):\n self._log_msg_start(\"Reset to factory settings\")\n # Order of execution is clear, save, load. This will copy the factory default\n # settings from ROM to flash, load from flash, and activate.\n device_mask_dict = dict(\n deviceDevBbr=1, # devSpiFlash device battery backed RAM\n deviceDevFlash=1, # device Flash\n deviceDevEeprom=1, # device EEPROM\n deviceDeviceSpiFlash=1, # device SPI Flash\n )\n # self._ubx.send(\n # \"CFG-CFG\",\n # clearMask=0xFFFF,\n # saveMask=0xFFFF,\n # loadMask=0xFFFF,\n # deviceMask=device_mask_dict,\n # )\n self._ubx.send(\n \"CFG-CFG\",\n clearMask=0xFFFF,\n saveMask=0x0000,\n loadMask=0xFFFF,\n deviceMask=device_mask_dict,\n )\n self._ubx.send(\n \"CFG-CFG\",\n clearMask=0x0000,\n saveMask=dict(\n msgConf=1,\n ),\n loadMask=dict(),\n deviceMask=device_mask_dict,\n )",
"def hard_reset(self) -> None:\n os.system('rm -fr \"$HOME/.daf/\"')",
"async def reset(self, ctx):\n await self.config.clear_all_guilds()\n await ctx.send(\"Reset all settings to default values.\")",
"def resetDeviceStates(self):",
"def reset_secure_boot_keys(self):\n if self._is_boot_mode_uefi():\n self._change_secure_boot_settings('ResetToDefaultKeys', True)\n else:\n msg = ('System is not in UEFI boot mode. \"SecureBoot\" related '\n 'resources cannot be changed.')\n raise exception.IloCommandNotSupportedInBiosError(msg)",
"def restore():\r\n\tglobal mhp, php, men, pen\r\n\tmhp = 100\r\n\tphp = 100\r\n\tmen = 100\r\n\tpen = 100",
"def reset(self):\n\n\t\tself._send_message(\"RESET\", \"\\x00\")",
"def reset(self):\n self.rst.value(0) # RST on\n self.sleep_us(100) # reset impulse has to be >100 ns and <100 ms\n self.rst.value(1) # RST off\n # Defaults after reset:\n self.power = self.POWER_DOWN\n self.addressing = self.ADDRESSING_HORIZ\n self.instr = self.INSTR_BASIC\n self.display_mode = self.DISPLAY_BLANK\n self.temp_coeff = self.TEMP_COEFF_0\n self.bias = self.BIAS_1_11\n self.voltage = 3060",
"def reset(self):\n self.data = self._defaults",
"async def _reset_settings(self, ctx):\n data = await self.get_data(ctx)\n await data.Settings.clear()\n msg = (\"{0.name} ({0.id}) reset all \"\n \"casino settings.\").format(ctx.author)\n await ctx.send(msg)",
"def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()",
"def reset(self):\n self.params.resetParams()",
"def resetSettings(self):\n\n # it does this 4 times because for some reason it would not grab everything one time through. Investigate\n for i in range(4):\n\n networkNode = self.returnNetworkNode\n attrs = cmds.listAttr(networkNode, ud=True)\n\n for attr in attrs:\n attrType = str(cmds.getAttr(networkNode + \".\" + attr, type=True))\n\n if attrType == \"double\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, 0, lock=True)\n\n if attrType == \"bool\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, True, lock=True)\n\n if attrType == \"enum\":\n cmds.setAttr(networkNode + \".\" + attr, lock=False)\n cmds.setAttr(networkNode + \".\" + attr, 0, lock=True)\n\n # relaunch the UI\n self.updateSettingsUI()\n self.applyModuleChanges(self)",
"def _soft_reset(self):\n self._reset_specific_envs(self.episodes_done)\n self._update_other_info()",
"def _reset(self):\n self._interface.set('fw_wp_en', 'off')",
"def UnsetWiredDefault(self):\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile, \"default\"):\n if misc.to_bool(config.get(profile, \"default\")):\n config.set(profile, \"default\", False)\n config.write(open(self.wired_conf, \"w\"))\n self.SaveWiredNetworkProfile(profile)"
] | [
"0.68796086",
"0.66056365",
"0.65619004",
"0.65489376",
"0.6536054",
"0.6508475",
"0.64923877",
"0.6490318",
"0.6390865",
"0.6369686",
"0.63630855",
"0.6360946",
"0.6344241",
"0.6342181",
"0.6341036",
"0.63164306",
"0.6314139",
"0.62830174",
"0.62667483",
"0.62354726",
"0.620782",
"0.61914915",
"0.6181724",
"0.6167523",
"0.6161257",
"0.6152776",
"0.61489147",
"0.6137172",
"0.61319363",
"0.6130049"
] | 0.68314517 | 1 |
Gets the ilo firmware version for server capabilities | def _get_ilo_firmware_version(self):
manager, reset_uri = self._get_ilo_details()
ilo_firmware_version = manager['Firmware']['Current']['VersionString']
return {'ilo_firmware_version': ilo_firmware_version} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def firmware_version(self):\n return self._get_system_status()[\"firmware\"]",
"def get_ilo_firmware_version_as_major_minor(self):\n try:\n manager, reset_uri = self._get_ilo_details()\n ilo_fw_ver_str = (\n manager['Oem']['Hp']['Firmware']['Current']['VersionString']\n )\n return common.get_major_minor(ilo_fw_ver_str)\n except Exception:\n return None",
"def fw_version(self):\n return self.capabilities.get(\"fw_ver\")",
"def hw_version(self) -> str | None:\n return self.status.get(\"FIRMWARE\")",
"def firmware_version(self):\n return self.data.get('fw_ver')",
"def get_firmware_version():\r\n return utils.run('crossystem fwid').stdout.strip()",
"def firmware_version(self):\n buf = (ctypes.c_char * self.MAX_BUF_SIZE)()\n self._dll.JLINKARM_GetFirmwareString(buf, self.MAX_BUF_SIZE)\n return ctypes.string_at(buf).decode()",
"def hardware_version(self):\n return self.data.get('hw_ver')",
"def test_get_hyperflex_server_firmware_version_by_moid(self):\n pass",
"def get_firmware_version(self):\n fw_version = {\n \"BIOS\": self._api_helper.read_txt_file(BIOS_VER_PATH),\n \"BMC\": self.__get_bmc_ver(),\n \"SWITCH_CPLD1\": self.__get_cpld_ver(SW_CPLD1_VER_PATH),\n \"SWITCH_CPLD2\": self.__get_cpld_ver(SW_CPLD2_VER_PATH),\n }.get(self.name, \"Unknown\")\n\n return fw_version",
"def get_hardware_revision():\n return _pigpio_command(_control, _PI_CMD_HWVER, 0, 0)",
"def firmware_version(self):\n return self._read(MX_FIRMWARE_VERSION)",
"async def get_firmware_version(self):\n current_time = time.time()\n if self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE) == '':\n await self._send_sysex(PrivateConstants.REPORT_FIRMWARE, None)\n while self.query_reply_data.get(\n PrivateConstants.REPORT_FIRMWARE) == '':\n elapsed_time = time.time()\n if elapsed_time - current_time > 2:\n return None\n await asyncio.sleep(self.sleep_tune)\n reply = ''\n for x in self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE):\n reply_data = ord(x)\n if reply_data:\n reply += chr(reply_data)\n self.query_reply_data[PrivateConstants.REPORT_FIRMWARE] = reply\n return self.query_reply_data.get(PrivateConstants.REPORT_FIRMWARE)",
"def getFirmwareVersion(self, board=0):\n return self.callModule('admin', board, 0, 'getVersion')",
"def hardware_version(self) -> str:\n return self.camera_info[\"main_hw_version\"]",
"def get_firmware_version(self):\n request_command = self.parser_invoker.get_firmware_version_command_bytes(self.sequence_id, self.product_id)\n response_command_content = self.connectObj.send_receive_command(request_command)\n return response_command_content",
"def firmware_version(self) -> str:\n return self._firmware_version",
"def get_fw_version(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)",
"def get_version():\n ver = '0.0.0'\n req = restcall(0, 'config', 10.0)\n if req['text'] is not None:\n try: \n tree = ET.fromstring(req['text'])\n ver = tree.findall('app_version')[0].text\n if ver is None:\n ver = '0.0.0'\n _LOGGER.info(\"ISY: firmware version: %s\", ver)\n except ET.ParseError:\n _LOGGER.error(\"No version information found on ISY.\")\n return ver",
"async def get_firmware_version(self):\n if self.debug:\n print(\"Sending GET_FIRMWARE_VERSION\")\n\n response = await self.call_function(_COMMAND_GETFIRMWAREVERSION)\n if response is None:\n raise RuntimeError('Failed to detect the PN532')\n return tuple(response)",
"def firmware(self) -> str:\n return self._device_info[\"Firmware\"]",
"def get_firmware_version(self):\n cmd = protocol.GET_FIRMWARE_VERSION\n response = self.__send_and_receive(cmd)\n\n value = self.__gen_response_value(response)\n if value:\n self.firmware_version = value[0][1:]\n else:\n return False",
"def software_version(self) -> str:\n return self.camera_info[\"main_sw_version\"]",
"def test_get_hyperflex_server_firmware_version_list(self):\n pass",
"def get_firmware_version(self):\n response = self.call_function(PN532_COMMAND_GETFIRMWAREVERSION, 4)\n if response is None:\n raise RuntimeError('Failed to detect the PN532! Make sure there is sufficient power (use a 1 amp or greater power supply), the PN532 is wired correctly to the device, and the solder joints on the PN532 headers are solidly connected.')\n return (response[0], response[1], response[2], response[3])",
"def get_hardware_version(self):\n cmd = protocol.GET_HARDWARE_VERSION\n response = self.__send_and_receive(cmd)\n\n value = self.__gen_response_value(response)\n if value:\n self.hardware_version = value[0][1:]\n else:\n return False",
"def driver_version(self):\n data = fcntl.ioctl(self._fd, _EVIOCGVERSION, '\\x00\\x00\\x00\\x00')\n return struct.unpack(\"i\", data)[0]",
"def fw_ver(self):\n return self._fw_ver",
"def get_version(self):\r\n return self._arm.get_version()",
"def hardware_version(self):\n version = self._dll.JLINKARM_GetHardwareVersion()\n major = version / 10000 % 100\n minor = version / 100 % 100\n return '%d.%02d' % (major, minor)"
] | [
"0.76318765",
"0.74084985",
"0.73684555",
"0.7351483",
"0.7309879",
"0.71681994",
"0.7141852",
"0.696607",
"0.6915562",
"0.690672",
"0.68797165",
"0.6854152",
"0.67555326",
"0.6753871",
"0.67471206",
"0.67280734",
"0.6711157",
"0.6674547",
"0.66333795",
"0.6591106",
"0.6559826",
"0.65548754",
"0.64803994",
"0.6444525",
"0.6432335",
"0.6371255",
"0.6371154",
"0.63599116",
"0.631369",
"0.63107944"
] | 0.7568138 | 1 |
Return sriov enabled or not | def _is_sriov_enabled(self):
return (self._get_bios_setting('Sriov') == 'Enabled') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_enabled(self):",
"def swo_enabled(self):\n return self._swo_enabled",
"def ms_get_rstp_enabled(self):\n self.open_route('/configure/switch_settings', \"Switch\")\n dropdown_value = page_utils.get_dropdown_value(\n self.get_page(),\n var_id='node_group_use_stp')\n return dropdown_value == 'Enable RSTP'",
"def Enabled(self) -> bool:",
"def get_prog_enable(self):\n #en = self._get_prop(\"enabled\")\n #return bool( en == \"true\" )\n if \"enabled\" in self._mydict:\n return bool(self._mydict[\"enabled\"] == \"true\")\n return True",
"def isEnabled(self):",
"def get_isenabled(self):\n return self.isenabled",
"def isEnabled(self) -> bool:\n ...",
"def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")",
"def enabled(self) -> bool:\n return pulumi.get(self, \"enabled\")",
"def is_scr_res_enabled(self):\n\t\treturn bool(call_sdk_function('PrlVmCfg_IsScrResEnabled', self.handle))",
"def enabled(self):\n return True",
"def enabled(self):\n return True",
"def has_stp_cli(self):\n if self.is_escom_l:\n cmd = self.cli(\"show spanning-tree\")\n return \"Spanning tree enabled\" in cmd\n else:\n cmd = self.cli(\"show spanning-tree active\")\n return \" enabled \" in cmd",
"def get_new_config(self):\n app_config = zaza.model.get_application_config(self.application_name)\n return 'enable-sriov', str(not app_config['enable-sriov']['value'])",
"def has_sriovdp_enabled(labels):\n if not labels:\n return False\n\n for label in labels:\n if label.label_key == helm_common.LABEL_SRIOVDP and label.label_value:\n return helm_common.LABEL_VALUE_ENABLED == label.label_value.lower()\n\n # We haven't found the sriovdp node key. Return False\n return False",
"def isSirenActive(self) -> bool:\r\n if self.visprotocol is not None:\r\n return self.visprotocol.isSirenActive()\r\n return False",
"def sso_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"sso_enabled\")",
"def is_enabled(self):\n return self.sdk.is_enabled",
"def enabled(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"enabled\")",
"def enabled(self):\n return self._get('enabled')",
"def is_custom_mode_enabled(self):\n return os.environ.get('SNYK_CUSTOM_MODE', 'false').lower() in ('1', 'yes', 'true')",
"def shortenable(s):\n return s, True",
"def isEnabled(self):\n return self.enabled",
"def getStatus(self):\n return self.enabled",
"def enable_snat(self) -> bool:\n return pulumi.get(self, \"enable_snat\")"
] | [
"0.644866",
"0.63986534",
"0.62832564",
"0.6205629",
"0.6096058",
"0.6000314",
"0.5948032",
"0.5870357",
"0.5815211",
"0.5815211",
"0.5815211",
"0.5815211",
"0.5815211",
"0.5815211",
"0.57979065",
"0.5792855",
"0.5792855",
"0.5763365",
"0.57592833",
"0.5725535",
"0.5724957",
"0.57136196",
"0.5710624",
"0.5689521",
"0.5680936",
"0.5666903",
"0.5659068",
"0.5655457",
"0.56496453",
"0.5640356"
] | 0.78212315 | 0 |
Returns the given virtual media device status and device URI | def _get_vm_device_status(self, device='FLOPPY'):
valid_devices = {'FLOPPY': 'floppy',
'CDROM': 'cd'}
# Check if the input is valid
if device not in valid_devices:
raise exception.IloInvalidInputError(
"Invalid device. Valid devices: FLOPPY or CDROM.")
manager, uri = self._get_ilo_details()
try:
vmedia_uri = manager['links']['VirtualMedia']['href']
except KeyError:
msg = ('"VirtualMedia" section in Manager/links does not exist')
raise exception.IloCommandNotSupportedError(msg)
for status, hds, vmed, memberuri in self._get_collection(vmedia_uri):
status, headers, response = self._rest_get(memberuri)
if status != 200:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
if (valid_devices[device] in
[item.lower() for item in response['MediaTypes']]):
vm_device_uri = response['links']['self']['href']
return response, vm_device_uri
# Requested device not found
msg = ('Virtualmedia device "' + device + '" is not'
' found on this system.')
raise exception.IloError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_vmedia_device_uri(self, device):\n\n try:\n sushy_system = self._get_sushy_system()\n uri = utils.get_subresource_path_by(sushy_system, 'VirtualMedia')\n resp = sushy_system._conn.get(uri)\n vmedia_resp = json.loads(resp.text)\n for val in vmedia_resp.get(\"Members\"):\n for key in val:\n if device in val[key]:\n return val[key]\n except sushy.exceptions.SushyError as e:\n msg = (self._('Not able to find find vmedia device URI. Error: '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)",
"def get_vmedia_device_status(self, device=\"cd0\"):\n\n if device not in VALID_VMEDIA_DEVICES:\n raise exception.InvalidInputError(\n \"Invalid device. Valid devices: cd0 or cd1 or hd0 or hd1.\")\n sushy_system = self._get_sushy_system()\n device = VALID_VMEDIA_DEVICES.get(device)\n\n vmedia_device_uri = self.get_vmedia_device_uri(device)\n\n try:\n resp = sushy_system._conn.get(vmedia_device_uri)\n return resp.text\n except sushy.exceptions.SushyError as e:\n msg = (self._('Error: %(error)s') %\n {'error': str(e)})\n raise exception.SDFlexError(msg)",
"def get_vmedia_status(self):\n\n try:\n sushy_system = self._get_sushy_system()\n vmedia_status = sushy_system.vmedia\n except sushy.exceptions.SushyError as e:\n msg = (self._('The vmedia is not found. Error '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n return vmedia_status",
"def get_vm_status(self, device='FLOPPY'):\n response, vm_device_uri = self._get_vm_device_status(device)\n\n # Create RIBCL equivalent response\n # RIBCL provides this data in VM status\n # VM_APPLET = CONNECTED | DISCONNECTED\n # DEVICE = FLOPPY | CDROM\n # BOOT_OPTION = BOOT_ALWAYS | BOOT_ONCE | NO_BOOT\n # WRITE_PROTECT = YES | NO\n # IMAGE_INSERTED = YES | NO\n response_data = {}\n\n if response.get('WriteProtected', False):\n response_data['WRITE_PROTECT'] = 'YES'\n else:\n response_data['WRITE_PROTECT'] = 'NO'\n\n if response.get('BootOnNextServerReset', False):\n response_data['BOOT_OPTION'] = 'BOOT_ONCE'\n else:\n response_data['BOOT_OPTION'] = 'BOOT_ALWAYS'\n\n if response.get('Inserted', False):\n response_data['IMAGE_INSERTED'] = 'YES'\n else:\n response_data['IMAGE_INSERTED'] = 'NO'\n\n if response.get('ConnectedVia') == 'NotConnected':\n response_data['VM_APPLET'] = 'DISCONNECTED'\n # When media is not connected, it's NO_BOOT\n response_data['BOOT_OPTION'] = 'NO_BOOT'\n else:\n response_data['VM_APPLET'] = 'CONNECTED'\n\n response_data['IMAGE_URL'] = response['Image']\n response_data['DEVICE'] = device\n\n # FLOPPY cannot be a boot device\n if ((response_data['BOOT_OPTION'] == 'BOOT_ONCE') and\n (response_data['DEVICE'] == 'FLOPPY')):\n response_data['BOOT_OPTION'] = 'NO_BOOT'\n\n return response_data",
"def new_media_status(self, media_status):\n if (\n media_status\n and media_status.player_is_idle\n and media_status.idle_reason == \"ERROR\"\n ):\n external_url = None\n internal_url = None\n url_description = \"\"\n with suppress(NoURLAvailableError): # external_url not configured\n external_url = get_url(self.hass, allow_internal=False)\n\n with suppress(NoURLAvailableError): # internal_url not configured\n internal_url = get_url(self.hass, allow_external=False)\n\n if media_status.content_id:\n if external_url and media_status.content_id.startswith(external_url):\n url_description = f\" from external_url ({external_url})\"\n if internal_url and media_status.content_id.startswith(internal_url):\n url_description = f\" from internal_url ({internal_url})\"\n\n _LOGGER.error(\n (\n \"Failed to cast media %s%s. Please make sure the URL is: \"\n \"Reachable from the cast device and either a publicly resolvable \"\n \"hostname or an IP address\"\n ),\n media_status.content_id,\n url_description,\n )\n\n self.media_status = media_status\n self.media_status_received = dt_util.utcnow()\n self.schedule_update_ha_state()",
"def get_camera_status():\n\n\ttarget = send_command('getstatus cam')\n\tsplit_ans = target.split()\n\t\n\treturn split_ans",
"def _media_status(self):\n media_status = self.media_status\n media_status_received = self.media_status_received\n\n if (\n media_status is None\n or media_status.player_state == MEDIA_PLAYER_STATE_UNKNOWN\n ):\n groups = self.mz_media_status\n for k, val in groups.items():\n if val and val.player_state != MEDIA_PLAYER_STATE_UNKNOWN:\n media_status = val\n media_status_received = self.mz_media_status_received[k]\n break\n\n return (media_status, media_status_received)",
"def verify_device_dmr(self, device):\n self.assertEqual(device.av_transport_url, AV_TRANSPORT_URL)",
"def getVirtualStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/qemu/%s/status/current' % (node,vmid),None)\n return data",
"def guess_vserver_device():\n\n s = commands.getoutput('/bin/mount | /bin/grep tagxid | /usr/bin/head -n 1')\n device = s.split()[0]\n\n return device",
"def _manufacturer_from_status(status: dict[str, str]) -> str | None:\n return (\n status.get(\"device.mfr\")\n or status.get(\"ups.mfr\")\n or status.get(\"ups.vendorid\")\n or status.get(\"driver.version.data\")\n )",
"def _get_mount_status(self, vm=None):\n result = Shell.run(f\"multipass info {vm} --format=json\")\n\n if f'instance \"{vm}\" does not exist' in result:\n dict_result = {\n 'name': vm,\n 'status': \"instance does not exist\"\n }\n else:\n result = json.loads(result)\n dict_result = {\n 'name': vm,\n 'status': result[\"info\"][vm]['state'],\n 'mounts': result[\"info\"][vm]['mounts']\n }\n return dict_result",
"def get_member_device(self, device):\n for vmedia_device in self.get_members():\n if device in vmedia_device.media_types:\n return vmedia_device",
"def test_device_status(self):\n #071031031E3067\n self.ms.add_response({'\\x14071031031E3067\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.status((49, 3))\n self.assertTrue(response)",
"def _rest_call(self, data, action):\n path = '/wm/device/?ipv4=' + data\n conn = httplib.HTTPConnection(self.host, self.port)\n conn.request('GET', path)\n response = conn.getresponse()\n ret = (response.status, response.reason, response.read())\n conn.close()\n return ret",
"def query_device_handle(runtime, query_str):\r\n devices_manager = runtime.devices_manager\r\n dname, sname = query_str.split('.')\r\n\r\n dev = devices_manager.find_devices(dname)\r\n if dev is None:\r\n print(f'[Debug] Query {dname} from DevicesManager and got None.', file=sys.stderr)\r\n raise ValueError(f'Device {dname} not in database.')\r\n\r\n ret = dev.get_status_value(sname)\r\n if ret is None:\r\n print(f'[Debug] Query {dname}.{sname} from DevicesManager and got None.', file=sys.stderr)\r\n raise ValueError(f'Status {dname}.{sname} not in database.')\r\n\r\n return ret",
"def getDevice(driver):\n devices = list(listDevices(driver))\n if not devices:\n print('No devices found. Ensure your camera is connected.')\n elif len(devices) != 1:\n print('Too many devices found. Only one camera is supported')\n else:\n return devices[0]",
"def ProcessStatusUploadRequest(self, device_status, session_status):\n # Empty responses indicate a successful upload.\n device_status_report_response = dm.DeviceStatusReportResponse()\n session_status_report_response = dm.SessionStatusReportResponse()\n\n response = dm.DeviceManagementResponse()\n response.device_status_report_response.CopyFrom(\n device_status_report_response)\n response.session_status_report_response.CopyFrom(\n session_status_report_response)\n\n return (200, response)",
"async def get_device_status(self, device_id: str) -> dict:\r\n return await self.get(API_DEVICE_STATUS.format(device_id=device_id))",
"def _firmware_from_status(status: dict[str, str]) -> str | None:\n return status.get(\"ups.firmware\") or status.get(\"ups.firmware.aux\")",
"def getDetailedStatus (self):\n try:\n if ((self._activateData == None) or (self._activateData.blockDevice == None)):\n self._log(\"no-block-device-no-status\").debug2(\"no block device was found for file system '%s'\",self._logicalDiskName)\n return None,ReturnCodes.kOk\n\n else:\n terminateTimeOut = self._activeTimeoutsConfig.getStatus\n timer = common.Timer(terminateTimeOut)\n blockDevice = self._activateData.blockDevice\n statusDisctionary,rc = self._tune2fs(blockDevice,timer)\n \n if (rc != ReturnCodes.kOk):\n self._log(\"get-status-failed\").error(\"getDetailedStatus() for file system '%s' failed!\",self._logicalDiskName)\n return None,ReturnCodes.kGeneralError\n\n if (statusDisctionary == None):\n self._log(\"have-block-but-no-status\").debug2(\"block device '%s' for file system '%s' - could not fins file-system status\",blockDevice,self._logicalDiskName)\n else:\n self._log(\"status-found\").debug2(\"block device '%s' for file system '%s' - status found!\",blockDevice,self._logicalDiskName)\n\n return statusDisctionary,rc\n\n except Exception,e:\n self._log(\"get-file-system-status-exception\").error(\"getDetailedStatus(terminateTimeOut=%.2f) faild! exception = '%s'\",terminateTimeOut,e)\n return None,ReturnCodes.kGeneralError",
"def _get_device_info(self) -> NUTDeviceInfo | None:\n if not self._status:\n return None\n\n manufacturer = _manufacturer_from_status(self._status)\n model = _model_from_status(self._status)\n firmware = _firmware_from_status(self._status)\n device_info = NUTDeviceInfo(manufacturer, model, firmware)\n\n return device_info",
"def get(self, devicekey, *args):\n\n base_url = request.url_root[:-1]\n\n out = xml.etree.ElementTree.Element('root')\n out.set('xmlns', \"urn:schemas-upnp-org:device-1-0\")\n\n if devicekey.startswith(self.fhdhr.config.dict[\"main\"][\"uuid\"]):\n origin = devicekey.split(self.fhdhr.config.dict[\"main\"][\"uuid\"])[-1]\n origin_plugin_name = self.fhdhr.origins.origins_dict[origin].plugin_utils.plugin_name\n origin_plugin_version = self.fhdhr.origins.origins_dict[origin].plugin_utils.plugin_manifest[\"version\"]\n\n specVersion_out = sub_el(out, 'specVersion')\n sub_el(specVersion_out, 'major', \"1\")\n sub_el(specVersion_out, 'minor', \"0\")\n\n device_out = sub_el(out, 'device')\n\n sub_el(device_out, 'deviceType', \"urn:plex-tv:device:Media:1\")\n\n sub_el(device_out, 'friendlyName', \"%s %s\" % (self.fhdhr.config.dict[\"fhdhr\"][\"friendlyname\"], origin))\n sub_el(device_out, 'manufacturer', self.fhdhr.config.dict[\"rmg\"][\"reporting_manufacturer\"])\n sub_el(device_out, 'manufacturerURL', \"https://github.com/fHDHR/%s\" % origin_plugin_name)\n sub_el(device_out, 'modelName', self.fhdhr.config.dict[\"rmg\"][\"reporting_model\"])\n sub_el(device_out, 'modelNumber', origin_plugin_version)\n\n sub_el(device_out, 'modelDescription', \"%s %s\" % (self.fhdhr.config.dict[\"fhdhr\"][\"friendlyname\"], origin))\n sub_el(device_out, 'modelURL', \"https://github.com/fHDHR/%s\" % self.fhdhr.config.dict[\"main\"][\"reponame\"])\n\n serviceList_out = sub_el(device_out, 'serviceList')\n service_out = sub_el(serviceList_out, 'service')\n sub_el(out, 'URLBase', \"%s/rmg/%s%s\" % (base_url, self.fhdhr.config.dict[\"main\"][\"uuid\"], origin))\n sub_el(service_out, 'serviceType', \"urn:plex-tv:service:MediaGrabber:1\")\n sub_el(service_out, 'serviceId', \"urn:plex-tv:serviceId:MediaGrabber\")\n\n sub_el(device_out, 'UDN', \"uuid:%s%s\" % (self.fhdhr.config.dict[\"main\"][\"uuid\"], origin))\n\n fakefile = BytesIO()\n fakefile.write(b'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n fakefile.write(xml.etree.ElementTree.tostring(out, encoding='UTF-8'))\n device_xml = fakefile.getvalue()\n\n return Response(status=200,\n response=device_xml,\n mimetype='application/xml')",
"async def get_status(self) -> str:\n return await self.hw_device.status()",
"def check_media(self, media):\n return AbstractVLC.check_media(self, os.path.join(settings.get(\"path\", \"relative\", \"video\"), media))",
"def test_get_device_presence(self):\n\n device_id = self.properties['device1.id']\n response = self.api.get_device_presence(device_id)\n\n self.assertEqual(device_id, response.sdid, 'Sdids must match')\n self.assertIsNotNone(response.data.last_seen_on, 'last_seen_on')\n self.assertIsNotNone(response.data.connected, 'connected')",
"def media_file_info(self):\n\n if self.observationId and self.playerType == VLC:\n\n media = self.mediaplayer.get_media()\n\n logging.info(\"State: {}\".format(self.mediaplayer.get_state()))\n logging.info(\"Media (get_mrl): {}\".format(bytes_to_str(media.get_mrl())))\n logging.info(\"media.get_meta(0): {}\".format(media.get_meta(0)))\n logging.info(\n \"Track: {}/{}\".format(self.mediaplayer.video_get_track(), self.mediaplayer.video_get_track_count()))\n logging.info(\"number of media in media list: {}\".format(self.media_list.count()))\n logging.info(\"get time: {} duration: {}\".format(self.mediaplayer.get_time(), media.get_duration()))\n logging.info(\"Position: {} %\".format(self.mediaplayer.get_position()))\n logging.info(\"FPS: {}\".format(self.mediaplayer.get_fps()))\n logging.info(\"Rate: {}\".format(self.mediaplayer.get_rate()))\n logging.info(\"Video size: {}\".format(self.mediaplayer.video_get_size(0)))\n logging.info(\"Scale: {}\".format(self.mediaplayer.video_get_scale()))\n logging.info(\"Aspect ratio: {}\".format(self.mediaplayer.video_get_aspect_ratio()))\n logging.info(\"is seekable? {0}\".format(self.mediaplayer.is_seekable()))\n logging.info(\"has_vout? {0}\".format(self.mediaplayer.has_vout()))\n\n vlc_output = (\"State: {}<br>\"\n \"Media Resource Location: {}<br>\"\n \"File name: {}<br>\"\n \"Track: {}/{}<br>\"\n \"Number of media in media list: {}<br>\"\n \"get time: {}<br>\"\n \"duration: {}<br>\"\n \"Position: {} %<br>\"\n \"FPS: {}<br>\"\n \"Rate: {}<br>\"\n \"Video size: {}<br>\"\n \"Scale: {}<br>\"\n \"Aspect ratio: {}<br>\"\n \"is seekable? {}<br>\"\n \"has_vout? {}<br>\").format(self.mediaplayer.get_state(),\n bytes_to_str(media.get_mrl()),\n media.get_meta(0),\n self.mediaplayer.video_get_track(),\n self.mediaplayer.video_get_track_count(),\n self.media_list.count(),\n self.mediaplayer.get_time(),\n self.convertTime(media.get_duration() / 1000),\n self.mediaplayer.get_position(),\n self.mediaplayer.get_fps(),\n self.mediaplayer.get_rate(),\n self.mediaplayer.video_get_size(0),\n self.mediaplayer.video_get_scale(),\n self.mediaplayer.video_get_aspect_ratio(),\n \"Yes\" if self.mediaplayer.is_seekable() else \"No\",\n \"Yes\" if self.mediaplayer.has_vout() else \"No\"\n )\n\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Media file information\")\n self.results.ptText.setReadOnly(True)\n\n self.results.ptText.appendHtml(\"<b>VLC analysis</b><hr>\" + vlc_output)\n\n # FFmpeg analysis\n self.results.ptText.appendHtml(\"<br><b>FFmpeg analysis</b><hr>\")\n for nplayer in self.pj[OBSERVATIONS][self.observationId][FILE]:\n for filePath in self.pj[OBSERVATIONS][self.observationId][FILE][nplayer]:\n media_full_path = project_functions.media_full_path(filePath, self.projectFileName)\n # nframes, duration_ms, duration, fps, hasVideo, hasAudio = accurate_media_analysis(self.ffmpeg_bin, media_full_path)\n\n r = utilities.accurate_media_analysis2(self.ffmpeg_bin, media_full_path)\n nframes = r[\"frames_number\"]\n\n if \"error\" in r:\n self.results.ptText.appendHtml(\n \"File path: {filePath}<br><br>{error}<br><br>\".format(filePath=media_full_path,\n error=r[\"error\"]))\n else:\n self.results.ptText.appendHtml(\n \"File path: {}<br>Duration: {}<br>Bitrate: {}k<br>FPS: {}<br>Has video: {}<br>Has audio: {}<br><br>\".\n format(media_full_path, self.convertTime(r[\"duration\"]), r[\"bitrate\"], r[\"fps\"],\n r[\"has_video\"], r[\"has_audio\"]))\n\n self.results.ptText.appendHtml(\"Total duration: {} (hh:mm:ss.sss)\".\n format(self.convertTime(sum(self.duration) / 1000)))\n\n self.results.show()\n\n else:\n\n fn = QFileDialog(self).getOpenFileName(self, \"Select a media file\", \"\", \"Media files (*)\")\n filePath = fn[0] if type(fn) is tuple else fn\n\n if filePath:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Media file information\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(\"<br><b>FFmpeg analysis</b><hr>\")\n # nframes, duration_ms, duration, fps, hasVideo, hasAudio = accurate_media_analysis(self.ffmpeg_bin, filePath)\n r = utilities.accurate_media_analysis2(self.ffmpeg_bin, filePath)\n if \"error\" in r:\n self.results.ptText.appendHtml(\n \"File path: {filePath}<br><br>{error}<br><br>\".format(filePath=filePath, error=r[\"error\"]))\n else:\n self.results.ptText.appendHtml(\n \"File path: {}<br>Duration: {}<br>Bitrate: {}k<br>FPS: {}<br>Has video: {}<br>Has audio: {}<br><br>\".\n format(filePath, self.convertTime(r[\"duration\"]), r[\"bitrate\"], r[\"fps\"], r[\"has_video\"],\n r[\"has_audio\"]))\n\n self.results.show()",
"def device_info(dev, testbed_obj, showcmd='show version', save_to_json=False, logstdout=True):\n\n device = testbed_obj.devices[dev]\n device.connect(log_stdout=logstdout)\n response = device.parse(showcmd)\n print(f\"Response from {dev} is of type {type(response)} and length {len(response)}\")\n print(f\"RAW response: \\n{response}\\n\")\n print(f\"FORMATTED response:\\n{json.dumps(response, indent=4)}\")\n print(response.keys())\n\n if save_to_json:\n json_filename = f\"{dev}.json\"\n with open(json_filename, 'w', encoding='utf-8') as f:\n json.dump(response, f, ensure_ascii=False, indent=4)\n print(f\"\\nFILE SAVED: Saved Response to JSON file {json_filename}\")\n\n return device, response",
"async def _get_scene_device_status(group: int):\n scene = await async_get_scene(group)\n for addr in scene[\"devices\"]:\n device = devices[addr]\n if device:\n await device.async_status()",
"def uber_syntax(self):\n returned = self.get_a_device_id()\n if returned:\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", body={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", body={\"ids\": DEVICE_ID})):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetails\", parameters={\"ids\": DEVICE_ID})):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetailsV1\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"GetDeviceDetailsV2\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", body={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", parameters={\"ids\": [DEVICE_ID]})):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", ids=DEVICE_ID)):\n returned = False\n if not self.valid_status_code(uber.command(\"PostDeviceDetailsV2\", ids=[DEVICE_ID])):\n returned = False\n\n return returned"
] | [
"0.7345246",
"0.65547144",
"0.652944",
"0.60734904",
"0.5523844",
"0.5463692",
"0.5434447",
"0.5412849",
"0.5385215",
"0.5331038",
"0.5283186",
"0.5263492",
"0.52565235",
"0.5243921",
"0.5218811",
"0.5213309",
"0.5212679",
"0.51299715",
"0.50834125",
"0.5077698",
"0.5065594",
"0.50595325",
"0.5039625",
"0.50188446",
"0.50167763",
"0.5006009",
"0.49876255",
"0.49872783",
"0.4979496",
"0.49682114"
] | 0.70259494 | 1 |
Sets the Virtual Media drive status It sets the boot option for virtual media device. | def set_vm_status(self, device='FLOPPY',
boot_option='BOOT_ONCE', write_protect='YES'):
# CONNECT is a RIBCL call. There is no such property to set in RIS.
if boot_option == 'CONNECT':
return
boot_option_map = {'BOOT_ONCE': True,
'BOOT_ALWAYS': False,
'NO_BOOT': False
}
if boot_option not in boot_option_map:
msg = ('Virtualmedia boot option "' + boot_option + '" is '
'invalid.')
raise exception.IloInvalidInputError(msg)
response, vm_device_uri = self._get_vm_device_status(device)
# Update required property
vm_settings = {}
vm_settings['Oem'] = (
{'Hp': {'BootOnNextServerReset': boot_option_map[boot_option]}})
# perform the patch operation
status, headers, response = self._rest_patch(
vm_device_uri, None, vm_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enable_vmedia(self, set_vmedia_state):\n\n if not isinstance(set_vmedia_state, bool):\n msg = ('The parameter \"%(parameter)s\" value \"%(value)s\" for '\n 'vmedia is invalid. Valid values are: True/False.' %\n {'parameter': 'ServiceEnabled',\n 'value': set_vmedia_state})\n raise exception.InvalidInputError(msg)\n sushy_system = self._get_sushy_system()\n sdflex_virtual_media.VirtualMedia.enable_vmedia(sushy_system,\n set_vmedia_state)",
"def step7(self):\n for indx, mr in enumerate(self.mrs):\n self.log.info(\"Set boot drive on controller:%d\"\n % (mr.ctrl_id))\n for vd in self.mr_vds[indx]:\n if (int(mr.cli.bootdrive_vd_get()) != vd):\n mr.cli.bootdrive_vd_set(vd_id=self.mr_vds[indx][indx],\n setting=\"On\")\n break",
"def set_vm_status(self, boot_on_next_reset):\n data = {\n \"Oem\": {\n \"Hpe\": {\n \"BootOnNextServerReset\": boot_on_next_reset\n }\n }\n }\n self._conn.patch(self.path, data=data)",
"def set_media_volume_sync(self, dut_name, enable=True):\n try:\n if self.phone_info.phone_type == PhoneType.ANDROID and 'SM' in self._get_android_phone_model():\n is_bt_connected_to_device = self.bt_is_connected_to(dut_name)\n if not is_bt_connected_to_device:\n logger.debug(\n 'For phone found that DUT {} is not connected with {} , '\n 'So Media Volume Sync option is not available '.format(\n dut_name,\n self.phone_info.bluetooth_name))\n return False\n\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.bluetooth_more_options,\n 5)\n self.find_element(self.driver.appium_driver,\n self.bluetooth_more_options, 0).click()\n\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.media_volume_text,\n 10)\n self.find_element(self.driver.appium_driver,\n self.media_volume_text, 0).click()\n\n self.wait_till_element_to_be_visible(self.driver.appium_driver,\n self.media_volume_sync_switch,\n 10)\n\n volume_sync_switch = self.find_element(\n self.driver.appium_driver, self.media_volume_sync_switch,\n 0)\n\n # Now click that button if we're in the wrong state.\n is_media_volume_sync = self._toggle_switch(volume_sync_switch,\n enable)\n self.driver.appium_driver.back()\n logger.debug(\n \"Media Volume option is set to {} on connected bluetooth devices {}\".format(\n enable, dut_name))\n return is_media_volume_sync\n logger.warning(\n \"Media Volume Sync Option is not available on {} connected bluetooth devices\".format(\n self.phone_info.bluetooth_name))\n except Exception as e:\n logger.warning(\n \"Could not enable/disable Media Volume Sync on connected mobile devices {}\"\n .format(self.phone_info.bluetooth_name))\n logger.warning(repr(e))\n return False",
"def set_status(self, status, status_extra, last_command=None, last_device_command=None, delay=None):\n if delay is None:\n delay = 0.100\n\n if last_device_command is not None:\n command = last_device_command.command\n request_id = last_device_command.request_id\n else:\n command = None\n request_id = None\n\n if last_command is not None:\n command = last_command\n\n if status is None:\n self.yombo_device.set_status_delayed(\n delay=delay,\n machine_status_extra=status_extra,\n request_id=request_id,\n reported_by=\"Wemo node\"\n )\n else:\n self.yombo_device.set_status_delayed(\n delay=delay,\n command=command,\n request_id=request_id,\n machine_status=status,\n machine_status_extra=status_extra,\n reported_by=\"Wemo node\"\n )",
"def set_virtual_stage(self, virtual_stage: int) -> None:\n self.virtual_stage = virtual_stage",
"def set_drive_mode(mode):",
"def SetStatus(self, status):\r\n self.status = status",
"def set_status(self, status):\n if status == 'qw':\n status = 'Waiting'\n elif status == 'hqw':\n status = 'Held'\n elif status == 'Eqw':\n status = 'Error'\n else:\n sys.exit(20)\n self.status = status\n return",
"def update(self):\n try:\n if self._remote.power() == 1:\n self._state = STATE_ON\n else:\n self._state = STATE_OFF\n\n # Set TV to be able to remotely power on\n # self._remote.power_on_command_settings(2)\n if self._remote.mute() == 2:\n self._muted = False\n else:\n self._muted = True\n self._volume = self._remote.volume() / 60\n except OSError:\n self._state = STATE_OFF",
"def set_status(self, status):\n self.status = status",
"def set_status(self, status):\n self.status = status",
"def set_status(self, status):\n self.status = status",
"def setstatus(self, status):\n with self.lock:\n self.status = status",
"def _setBootable(self, bootable):\n if self.partedPartition:\n if arch.isS390():\n return\n if self.flagAvailable(parted.PARTITION_BOOT):\n if bootable:\n self.setFlag(parted.PARTITION_BOOT)\n else:\n self.unsetFlag(parted.PARTITION_BOOT)\n else:\n raise errors.DeviceError(\"boot flag not available for this partition\", self.name)\n\n self._bootable = bootable\n else:\n self.req_bootable = bootable",
"def set_status(self, root, status='queued'):\n # Touch the status file\n Path(f'{root}.{status}').touch()",
"def manual_driving(self):\n\n self.start_driving()",
"def set_status(self, status):\n # TODO log to db\n self.status = status",
"def set_states(self) -> None:\n self._attr_state = (\n MediaPlayerState.ON if self._zone.power else MediaPlayerState.OFF\n )\n self._attr_is_volume_muted = self._zone.mute\n self._attr_volume_level = self._zone.volume_as_percentage\n self._attr_media_title = self._zone.input_name\n self._attr_app_name = self._zone.input_format\n self._attr_source = self._zone.input_name\n self._attr_source_list = self.avr.input_list",
"def set_remote_status(self, mode):\n status = {\n 0: \"Local and locked\",\n 1: \"Remote and locked\",\n 2: \"Local and unlocked\",\n 3: \"Remote and unlocked\",\n }\n logging.info(__name__ + ' : Setting remote control status to %s' % status.get(mode, \"Unknown\"))\n self._execute('C%s' % mode)",
"def set_vpn_state(self, status):\n if hasattr(self, status):\n self.change_to(getattr(self, status))",
"def setStatus(self, status):\n self.__status = status",
"def set_autoreboot_status(self, status: int) -> str:\n return self._req_post(self._URLS['SetAutoreboot'], data={\"autoRebootEn\": status, \"delayRebootEn\": True, \"rebootTime\": \"02: 00\"})",
"def setOn(self, command):\r\n self.setDriver('ST', 1)",
"def status(self, cmd):\n\n self.actor.sendVersionKey(cmd)\n self.actor.camera.sendStatusKeys(cmd)\n \n cmd.inform('text=\"Present!\"')\n cmd.finish()",
"def change_status():\n if self.on:\n connect.SOCKET.sendall(bytes(\"OFF\\n\", \"utf-8\"))\n self.on = False\n else:\n connect.SOCKET.sendall(bytes(\"ON\\n\", \"utf-8\"))\n self.on = True",
"def set_directory_status(self, i, status):\n\t\tself.directoryModel.set_value(i, 'directoryStatus', status)",
"def initStatus(status):\n if status == 0 :\n print(\"Supported controller connected\")\n elif status < 0 :\n print(\"No supported controller detected\")\n else:\n print(\"Waiting for controller {}\".format(status) )",
"def drive_mode(self, value):\n self._write(MX_DRIVE_MODE, value)",
"def test_list_drives_drive_firmware_update(self):\n pass"
] | [
"0.6145363",
"0.60608554",
"0.604248",
"0.58181196",
"0.5729332",
"0.56799835",
"0.56557316",
"0.5576355",
"0.5562009",
"0.5548635",
"0.5530569",
"0.5530569",
"0.5530569",
"0.54632413",
"0.54603094",
"0.54399",
"0.5374403",
"0.53504235",
"0.52688646",
"0.5256338",
"0.5245759",
"0.52405393",
"0.5226826",
"0.52200913",
"0.51879424",
"0.51837105",
"0.5166063",
"0.516539",
"0.5162931",
"0.51392525"
] | 0.6670368 | 0 |
Notifies iLO of the location of a virtual media diskette image. | def insert_virtual_media(self, url, device='FLOPPY'):
response, vm_device_uri = self._get_vm_device_status(device)
# Eject media if there is one. RIBCL was tolerant enough to overwrite
# existing media, RIS is not. This check is to take care of that
# assumption.
if response.get('Inserted', False):
self.eject_virtual_media(device)
# Update required property
vm_settings = {}
vm_settings['Image'] = url
# Perform the patch operation
status, headers, response = self._rest_patch(
vm_device_uri, None, vm_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_volume_after_attached_to_vm(self, info, vms):\n path = info[0]['path']\n path_list = path.split(sep='/')\n machine_path_list = [\"~\", \"Home\"]\n machine_path_list.extend(path_list[3:])\n info[0]['machine_path'] = \"/\".join(machine_path_list)\n info[0]['AttachedToVm'] = vms\n info[0]['State'] = 'in-use'\n info[0]['time'] = datetime.datetime.now()\n return info",
"def test_view_volume(self, volume, volumes_steps):\n volumes_steps.view_volume(volume.name)",
"def test_disk(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"file1\"\n self.command.run()\n self.command.finished()\n self.check_diff(\"\"\"\n <ovf:References>\n- <ovf:File ovf:href=\"input.vmdk\" ovf:id=\"file1\" ovf:size=\"{vmdk_size}\" />\n <ovf:File ovf:href=\"input.iso\" ovf:id=\"file2\" ovf:size=\"{iso_size}\" />\n...\n <ovf:Info>Virtual disk information</ovf:Info>\n- <ovf:Disk ovf:capacity=\"1\" ovf:capacityAllocationUnits=\"byte * 2^30\" \\\novf:diskId=\"vmdisk1\" ovf:fileRef=\"file1\" ovf:format=\"http://www.vmware.com/\\\ninterfaces/specifications/vmdk.html#streamOptimized\" />\n </ovf:DiskSection>\n...\n <rasd:AddressOnParent>0</rasd:AddressOnParent>\n- <rasd:ElementName>Hard Drive</rasd:ElementName>\n- <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>\n- <rasd:InstanceID>6</rasd:InstanceID>\n- <rasd:Parent>3</rasd:Parent>\n- <rasd:ResourceType>17</rasd:ResourceType>\n- </ovf:Item>\n- <ovf:Item>\n- <rasd:AddressOnParent>0</rasd:AddressOnParent>\n <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>\n\"\"\".format(vmdk_size=self.FILE_SIZE['input.vmdk'],\n iso_size=self.FILE_SIZE['input.iso']))\n self.assertFalse(os.path.exists(os.path.join(self.temp_dir,\n \"input.vmdk\")),\n \"deleted file should not be exported\")",
"def command_photo(self, bot, update):\n\n self.send_message(bot, update, \"Not implemented yet.\")",
"def update_volume_after_detach(self, info, vms):\n info[0]['AttachedToVm'] = vms\n if len(vms) == 0:\n info[0]['machine_path'] = None\n info[0]['State'] = 'available'\n info[0]['time'] = datetime.datetime.now()\n return info",
"def eject_virtual_media(self, device='FLOPPY'):\n response, vm_device_uri = self._get_vm_device_status(device)\n\n # Check if virtual media is connected.\n if response.get('Inserted') is False:\n return\n\n # Update required property\n vm_settings = {}\n vm_settings['Image'] = None\n\n # perform the patch operation\n status, headers, response = self._rest_patch(\n vm_device_uri, None, vm_settings)\n\n if status >= 300:\n msg = self._get_extended_error(response)\n raise exception.IloError(msg)",
"def test_aws_service_api_volume_attachment_put(self):\n pass",
"def pv(self, *args, **kwargs):\n return _image.image_pv(self, *args, **kwargs)",
"def attach_volume(self, connection_info, instance, mountpoint):\n instance_name = instance['name']\n vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)\n if vm_ref is None:\n raise exception.InstanceNotFound(instance_id=instance_name)\n # Attach Volume to VM\n LOG.debug(_(\"Attach_volume: %(connection_info)s, %(instance_name)s, \"\n \"%(mountpoint)s\") % locals())\n driver_type = connection_info['driver_volume_type']\n if driver_type not in ['iscsi']:\n raise exception.VolumeDriverNotFound(driver_type=driver_type)\n data = connection_info['data']\n mount_unit = volume_util.mountpoint_to_number(mountpoint)\n\n # Discover iSCSI Target\n device_name, uuid = self.discover_st(data)\n if device_name is None:\n raise volume_util.StorageError(_(\"Unable to find iSCSI Target\"))\n\n # Get the vmdk file name that the VM is pointing to\n hardware_devices = self._session._call_method(vim_util,\n \"get_dynamic_property\", vm_ref,\n \"VirtualMachine\", \"config.hardware.device\")\n vmdk_file_path, controller_key, adapter_type, disk_type, unit_number \\\n = vm_util.get_vmdk_path_and_adapter_type(hardware_devices)\n # Figure out the correct unit number\n if unit_number < mount_unit:\n unit_number = mount_unit\n else:\n unit_number = unit_number + 1\n self.attach_disk_to_vm(vm_ref, instance_name,\n adapter_type, disk_type=\"rdmp\",\n controller_key=controller_key,\n unit_number=unit_number,\n device_name=device_name)\n LOG.info(_(\"Mountpoint %(mountpoint)s attached to \"\n \"instance %(instance_name)s\") % locals())",
"def on_image(self, image):",
"def newMoteDetected(self, mote):\n if self._printSWAP == True:\n print \"New mote with address \" + str(mote.address) + \" : \" + mote.definition.product + \\\n \" (by \" + mote.definition.manufacturer + \")\"",
"def volumes(self):",
"def segment(self):\n warning = QErrorMessage()\n warning.setWindowModality(Qt.WindowModal)\n warning.showMessage('Warning: IVUS Phenotyping is currently only supported for 20MHz images. Interpret other images with extreme caution')\n warning.exec_()",
"def energy_use(update: 'Update', context: 'CallbackContext'):\n bot = context.bot\n chat_id = update.message.chat_id\n url = \"https://vloer.ko-lab.space/verbruikdag.png?random=\" + str(randint(1,9999))\n\n try:\n bot.send_photo(chat_id=chat_id, photo=url)\n except Exception as err:\n msg = \"Oops...something went wrong: {}\".format(err)\n print(msg)\n update.message.reply_text(msg)",
"def mount_root_vm(self):\n print \"montage de la partition root de %s\" % name_vm_dest\n self.exec_cmd(\"mount /dev/%s/root-%s %s\" % (vgname, name_vm_dest, self.rep_vhosts_vm))",
"def set_volume(self, target: int) -> None:\n self.media.set_volume(target)\n self.system.notify(f\"Jarvis::Volume has been set to: {self.media.get_volume()['volume']}%\")",
"def image(self,v):\n self.set('heightfield.image',v)\n #assert fileExists(environment.makeFilePath(v)), \"Warning: HeightField's image file, {}, not found in images folder.\".format(v) \n return self",
"def __mount_ebs_volume( self ):\n ebs_volume_size = self.instance_tag( 'ebs_volume_size' ) or '0'\n ebs_volume_size = int( ebs_volume_size )\n if ebs_volume_size:\n instance_name = self.instance_tag( 'Name' )\n cluster_ordinal = int( self.instance_tag( 'cluster_ordinal' ) )\n volume_name = '%s__%d' % (instance_name, cluster_ordinal)\n volume = EC2VolumeHelper( ec2=self.ec2,\n availability_zone=self.availability_zone,\n name=volume_name,\n size=ebs_volume_size,\n volume_type=\"gp2\" )\n # TODO: handle case where volume is already attached\n device_ext = '/dev/sdf'\n device = '/dev/xvdf'\n volume.attach( self.instance_id, device_ext )\n\n # Wait for inode to appear and make sure its a block device\n while True:\n try:\n assert stat.S_ISBLK( os.stat( device ).st_mode )\n break\n except OSError as e:\n if e.errno == errno.ENOENT:\n time.sleep( 1 )\n else:\n raise\n\n # Only format empty volumes\n volume_label = volume_label_hash( volume_name )\n if check_output( [ 'file', '-sL', device ] ).strip( ) == device + ': data':\n check_call( [ 'mkfs', '-t', 'ext4', device ] )\n check_call( [ 'e2label', device, volume_label ] )\n else:\n # If the volume is not empty, verify the file system label\n actual_label = check_output( [ 'e2label', device ] ).strip( )\n if actual_label != volume_label:\n raise AssertionError(\n \"Expected volume label '%s' (derived from '%s') but got '%s'\" %\n (volume_label, volume_name, actual_label) )\n current_mount_point = self.__mount_point( device )\n if current_mount_point is None:\n mkdir_p( self.persistent_dir )\n check_call( [ 'mount', device, self.persistent_dir ] )\n elif current_mount_point == self.persistent_dir:\n pass\n else:\n raise RuntimeError(\n \"Can't mount device %s on '%s' since it is already mounted on '%s'\" % (\n device, self.persistent_dir, current_mount_point) )\n else:\n # No persistent volume is attached and the root volume is off limits, so we will need\n # to place persistent data on the ephemeral volume.\n self.persistent_dir = self.ephemeral_dir",
"def test_upload_new_vdisk(self, mock_create_file):\n\n # traits are already set to use the REST API upload\n\n # First need to load in the various test responses.\n vg_orig = tju.load_file(UPLOAD_VOL_GRP_ORIG, self.adpt)\n vg_post_crt = tju.load_file(UPLOAD_VOL_GRP_NEW_VDISK, self.adpt)\n\n self.adpt.read.return_value = vg_orig\n self.adpt.update_by_path.return_value = vg_post_crt\n mock_create_file.return_value = self._fake_meta()\n\n n_vdisk, f_wrap = ts.upload_new_vdisk(\n self.adpt, self.v_uuid, self.vg_uuid, None, 'test2', 50,\n d_size=25, sha_chksum='abc123')\n\n # Ensure the create file was called\n mock_create_file.assert_called_once_with(\n self.adpt, 'test2', vf.FileType.DISK_IMAGE, self.v_uuid,\n f_size=50, tdev_udid='0300f8d6de00004b000000014a54555cd9.3',\n sha_chksum='abc123')\n\n # Ensure cleanup was called after the upload\n self.adpt.delete.assert_called_once_with(\n 'File', service='web',\n root_id='6233b070-31cc-4b57-99bd-37f80e845de9')\n self.assertIsNone(f_wrap)\n self.assertIsNotNone(n_vdisk)\n self.assertIsInstance(n_vdisk, stor.VDisk)",
"def update_volumes():\n print 'do something useful here'",
"def add_volume_info(self, vi):\n vol_num = vi.volume_number\n self.volume_info_dict[vol_num] = vi\n if self.fh:\n self.fh.write(vi.to_string() + \"\\n\")",
"def image_received(self, image_message):\n # Convert the image message to something usable by opencv\n # http://wiki.ros.org/cv_bridge/Tutorials/ConvertingBetweenROSImagesAndOpenCVImagesPython\n # Note that mono8 and bgr8 are the two image encodings expected by most OpenCV functions.\n cv_image = self.bridge.imgmsg_to_cv2(image_message, desired_encoding=\"bgr8\")\n image_data = extract_data(cv_image)\n linear_velocity, angular_velocity = self.clf.predict(image_data)\n self.cmd_vel = Twist(linear=Vector3(x=linear_velocity), angular=Vector3(z=angular_velocity))\n rospy.loginfo(self.cmd_vel)",
"def update_info(self):\n # Return if it is locked\n if self.lock:\n return\n # Hide again if it was shown due to an error message\n if self.was_hidden:\n self.was_hidden = False\n self.toggle()\n # Left side\n try:\n # Directory if library is focused\n if self.vimiv.library.treeview.is_focus():\n self.left_label.set_text(os.getcwd())\n # Position, name and thumbnail size in thumb mode\n elif self.vimiv.thumbnail.toggled:\n pos = self.vimiv.get_pos()\n name = os.path.basename(self.vimiv.paths[pos])\n message = \"{0}/{1} {2} {3}\". \\\n format(pos + 1, len(self.vimiv.paths),\n name, self.vimiv.thumbnail.size)\n self.left_label.set_text(message)\n # Image info in image mode\n else:\n name = os.path.basename(self.vimiv.paths[self.vimiv.index])\n message = \"{0}/{1} {2} [{3:.0f}%]\". \\\n format(self.vimiv.index + 1, len(self.vimiv.paths), name,\n self.vimiv.image.zoom_percent * 100)\n self.left_label.set_text(message)\n except:\n self.left_label.set_text(\"No open images\")\n # Center\n if not (self.vimiv.thumbnail.toggled or\n self.vimiv.library.treeview.is_focus()) and self.vimiv.paths:\n mark = \"[*]\" if self.vimiv.paths[self.vimiv.index] \\\n in self.vimiv.mark.marked else \"\"\n else:\n mark = \"\"\n if self.vimiv.slideshow.running:\n slideshow = \"[slideshow - {0:.1f}s]\".format(\n self.vimiv.slideshow.delay)\n else:\n slideshow = \"\"\n message = \"{0} {1}\".format(mark, slideshow)\n self.center_label.set_text(message)\n # Right side\n mode = self.get_mode()\n message = \"{0:15} {1:4}\".format(mode, self.vimiv.keyhandler.num_str)\n self.right_label.set_markup(message)\n # Window title\n try:\n name = os.path.basename(self.vimiv.paths[self.vimiv.index])\n self.vimiv.set_title(\"vimiv - \" + name)\n except:\n self.vimiv.set_title(\"vimiv\")\n # Size of statusbar for resizing image\n self.size = self.vimiv.statusbar.bar.get_allocated_height()",
"def test_items_are_mounted(self):\n response2 = self.client.get(\"/importer/design26/models.py\")\n self.assertEquals(response2.status_code, 200)",
"def on_station_admin_volume_host_path_added(\n self, func,\n ):\n self._set_event_handler(\"stations\")\n self._events.on_station_admin_volume_host_path_added(func)",
"def test_manage_volume_attachments(self, volume, instance, volumes_steps):\n volumes_steps.attach_instance(volume.name, instance.name)\n volumes_steps.detach_instance(volume.name, instance.name)",
"def get_image_path(self):\n\t\treturn call_sdk_function('PrlVmDev_GetImagePath', self.handle)",
"def drive_args(self, image, index):\n index_letter = chr(ord('a') + index)\n image_dir = \"%s/out/target/product/trusty\" % self.config.android\n return [\n \"-drive\",\n \"file=%s/%s.img,index=%d,if=none,id=hd%s,format=raw,snapshot=on\" %\n (image_dir, image, index, index_letter), \"-device\",\n \"virtio-blk-device,drive=hd%s\" % index_letter\n ]",
"def addSquareVignette(size,position,img):\n\n #img[position[1]:min(position[1]+size[1],img.shape[1]),position[0]:min(position[0]+size[0],img.shape[0])]*=0.5\n img[position[1]:position[1]+size[1],position[0]:position[0]+size[0]]*=0.5",
"def viewNMDinVMD(filename):\n\n vmd = pathVMD()\n if vmd:\n os.system('{0} -e {1}'.format(vmd, abspath(filename)))"
] | [
"0.53416073",
"0.50306785",
"0.49840182",
"0.49023584",
"0.48749703",
"0.48598105",
"0.48007807",
"0.4782172",
"0.47274348",
"0.46836528",
"0.46513668",
"0.46505046",
"0.4647543",
"0.46356696",
"0.4622129",
"0.46161428",
"0.46150172",
"0.46038243",
"0.4593726",
"0.4592042",
"0.45886222",
"0.45761573",
"0.45605978",
"0.45576236",
"0.4542211",
"0.45299044",
"0.45179075",
"0.45157865",
"0.44944018",
"0.44839922"
] | 0.52932024 | 1 |
Ejects the Virtual Media image if one is inserted. | def eject_virtual_media(self, device='FLOPPY'):
response, vm_device_uri = self._get_vm_device_status(device)
# Check if virtual media is connected.
if response.get('Inserted') is False:
return
# Update required property
vm_settings = {}
vm_settings['Image'] = None
# perform the patch operation
status, headers, response = self._rest_patch(
vm_device_uri, None, vm_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _handle_removed_media(self):\r\n if self.has_media():\r\n try:\r\n image = str(self.image)\r\n os.remove(image)\r\n except OSError:\r\n raise('Failure trying to remove image from filesystem.')\r\n return True",
"def eject_image(self, identity, device):\n device_info = self._get_device(identity, device)\n\n device_info['Image'] = ''\n device_info['ImageName'] = ''\n device_info['Inserted'] = False\n device_info['WriteProtected'] = False\n device_info['UserName'] = ''\n device_info['Password'] = ''\n\n self._devices.update({(identity, device): device_info})\n\n local_file = device_info.pop('_local_file', None)\n if local_file:\n try:\n os.unlink(local_file)\n\n self._logger.debug(\n 'Removed local file %(file)s for %(identity)s' % {\n 'identity': identity, 'file': local_file})\n except FileNotFoundError:\n # Ignore error as we are trying to remove the file anyway\n pass",
"def delete(self, *args, **kwargs):\n\t\tself.emo_img.delete(False)\n\t\tsuper(Emotion, self).delete(*args, **kwargs)",
"def __on_delete(self):\n self.image.delete()",
"def __on_delete(self):\n self.image.delete()",
"def eject_vmedia(self, device):\n device_name = VALID_VMEDIA_DEVICES.get(device)\n if not device_name:\n raise exception.InvalidInputError(\n \"Invalid device. Valid devices: cd0 or cd1 or hd0 or hd1.\")\n vmedia_partition_id = self.get_vmedia_device_uri(device_name)\n try:\n virtual_media_object = virtual_media.VirtualMedia(\n self._sushy._conn, vmedia_partition_id)\n virtual_media_object.eject_media()\n except sushy.exceptions.SushyError as e:\n msg = (self._('The Redfish System \"%(partition_id)s\" was '\n 'not found. Error %(error)s') %\n {'partition_id': vmedia_partition_id, 'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)",
"def clear_images(self):\r\n\r\n # audio = self.MutagenType(self['filename'])\r\n self.audio.pop(\"metadata_block_picture\", None)\r\n self.audio.pop(\"coverart\", None)\r\n self.audio.pop(\"coverartmime\", None)\r\n self.audio.save()",
"def clear_images(self):\r\n\r\n with translate_errors():\r\n self.audio.clear_pictures()\r\n self.audio.save()\r\n\r\n super().clear_images()",
"def clearImage(self):\n if self.hasImage():\n self.scene.removeItem(self._image)\n self._image = None",
"def delete(self, *args, **kwargs):\n self.image.storage.delete(self.image.name)\n delete(self.image)\n super().delete(*args, **kwargs)",
"def delete(self):\n\t\tif self.hasUdim:\n\t\t\tfor a in self.udimPaths:\n\t\t\t\ta.delete()\n\t\telse:\n\t\t\tsuper( textureFile, self ).delete()",
"def clean(self):\n if self.image:\n self.glance.images.delete(self.image['id'])\n\n if self.image_file:\n shutil.rmtree(self.download_path)",
"def kill_video(self):\n self.cap.truncate(0)\n cv2.destroyAllWindows()",
"def remove_image_file(sender, instance, **kwargs):\n # Pass false so ImageField doesn't save the model.\n instance.image.delete(False)",
"def __del__(self):\n if self.video:\n self.video.release()",
"def tearDown(self):\n self.image.delete()",
"def clearImage(self):\n if self.hasImage():\n self.scene.removeItem(self._pixmapHandle)\n self._pixmapHandle = None\n self.zoom=-1\n self.scene.clear()",
"def delete(self):\n os.remove(self.file_path)\n super(VideoFile, self).delete()",
"def destroy(self):\n url = \"/images/%s/destroy\" % (str(self.id))\n\n data = self._conn.request(url)\n\n log.debug(data)",
"def delete_image(self):\n Image.objects.get(id = self.id).delete()",
"def remove_image(self, imagename, del_img=False):\n os.system('rm -r {}.model'.format(imagename))\n os.system('rm -r {}.flux'.format(imagename))\n os.system('rm -r {}.psf'.format(imagename))\n os.system('rm -r {}.residual'.format(imagename))\n if del_img:\n os.system('rm -r {}.image'.format(imagename))",
"def __del__(self):\n self.vid.release()",
"def auto_delete_image_lecture_on_delete(sender, instance, **kwargs):\n if instance.file:\n instance.file.delete(save=False)",
"def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)",
"def delImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n img.delete()\n return",
"def clean(context):\n print(f\"Attempting to forcefully remove image {IMAGE_NAME}:{IMAGE_VER}\")\n context.run(f\"docker rmi {IMAGE_NAME}:{IMAGE_VER} --force\")\n print(f\"Successfully removed image {IMAGE_NAME}:{IMAGE_VER}\")",
"def removeScene(self):\n del self.scene, self.imgPixmapItem",
"def remove(self, done=False, verbose=True):\n return _image.image_remove(self, done, verbose)",
"def test_cambia_imagen_elimina_la_antigua(self):\n self.image_path = os.path.join(os.path.dirname(__file__), 'image_for_model2.jpg')\n image_path = self.image_obj.image.path\n self.image_obj.image = simple_uploaded_file(self.image_path)\n self.image_obj.save()\n\n self.assertNotEqual(image_path, self.image_obj.image.path)\n self.assertFalse(os.path.exists(image_path))",
"def __del__(self):\n self.video.release()"
] | [
"0.685616",
"0.6767739",
"0.6732249",
"0.66243017",
"0.66243017",
"0.6524107",
"0.63902587",
"0.6269782",
"0.61779577",
"0.60977596",
"0.60840386",
"0.6020783",
"0.5993817",
"0.5987346",
"0.5948459",
"0.59088904",
"0.5899906",
"0.5896522",
"0.5884364",
"0.58657366",
"0.5865728",
"0.58523566",
"0.5837543",
"0.5822411",
"0.57927376",
"0.5756274",
"0.57558244",
"0.5728832",
"0.5721798",
"0.57136536"
] | 0.7177198 | 0 |
Get details of persistent boot devices, its order | def _get_persistent_boot_devices(self):
# Check if the BIOS resource if exists.
headers_bios, bios_uri, bios_settings = self._check_bios_resource()
# Get the Boot resource.
boot_settings = self._get_bios_boot_resource(bios_settings)
# Get the BootSources resource
try:
boot_sources = boot_settings['BootSources']
except KeyError:
msg = ("BootSources resource not found.")
raise exception.IloError(msg)
try:
boot_order = boot_settings['PersistentBootConfigOrder']
except KeyError:
msg = ("PersistentBootConfigOrder resource not found.")
raise exception.IloCommandNotSupportedError(msg)
return boot_sources, boot_order | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_devices(self):\n return [x for x in self.devices.keys()]",
"def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n inits.append(line)\n self.unmount()\n if not inits:\n run_error(_(\"No initramfs found\"))\n return None\n if not kernel:\n run_error(_(\"GRUB problem:\\n\") + inits[0])\n return None\n return (kernel, inits)",
"def get_device_info(handle, timeout):\n device_info = dict()\n device_info['ls'] = ceph_mon_command(handle, 'device ls', timeout)\n\n return device_info",
"def load_devices():",
"def get_boot_record(disk):\n\n #TODO\n return \"Unknown\", \"Unknown\"",
"def get_device_file_dict():\n cmd = 'lshw -class disk'\n desc = \"description\"\n log_name = \"logical name\"\n serial = \"serial\"\n\n dev = []\n dev_list = []\n\n ret, output, err = run_gluster_command(cmd)\n output = output.decode('ASCII')\n dev_info = output.split('\\n')\n for line in dev_info:\n if re.search(desc, line):\n if dev:\n dev_list.append(dev)\n\n dev = []\n if re.search(log_name, line) or re.search(serial, line):\n temp = line.split(':')\n temp[1] = temp[1].strip(' ')\n dev.append(temp[1])\n dev_list.append(dev)\n for line in dev_list:\n print(line)",
"def get_devices(self):\n devices = self.get(\"event/device\")",
"def getDevices(self):\n\n devices = None\n\n for i in range(3):\n devices = subprocess.check_output(\"adb devices -l\", creationflags=self.createNoWindow)\n\n devices = devices.decode()\n deviceModel = re.findall(\"model:(.*) device\", devices)\n deviceID = re.findall(r\"(\\S+) {2}\", devices, flags=re.IGNORECASE)\n\n return deviceModel, deviceID",
"def load_devices(self):\n response = self.oauth.get(url=f'{self.base_url}/json/devices/list')\n\n result = response.json()['device']\n return [(device['id'], device['name'], device['state']) for device in result]",
"def test_get_bios_boot_mode_list(self):\n pass",
"def GetDeviceSerials(cls):\n cls._CheckAdb()\n adb_cmd = [cls._adb_command, _ADB_DEVICE]\n device_info = utils.CheckOutput(adb_cmd)\n serials = []\n # Skip the first line which is \"List of devices attached\". Each of the\n # following lines consists of the serial number, a tab character, and\n # the state. The last line is empty.\n for line in device_info.splitlines()[1:]:\n serial_state = line.split()\n if len(serial_state) > 1:\n serials.append(serial_state[0])\n return serials",
"def get_devices():\n devices, errors = [], []\n\n for path in hookenv.action_get('devices').split(' '):\n path = path.strip()\n if not os.path.isabs(path):\n errors.append('{}: Not absolute path.'.format(path))\n elif not os.path.exists(path):\n errors.append('{}: Device does not exist.'.format(path))\n else:\n devices.append(path)\n\n if errors:\n raise ZapDiskError(\", \".join(errors))\n\n return devices",
"def GetDeviceSerials(self):\n return self._device_serial_index.keys()",
"def get_devices(adb=DEFAULT_ADB):\n # Check that adb is running\n Device.__start_adb(adb)\n # Split by newline and remove first line (\"List of devices attached\")\n # TODO: surround with try/except?\n devices = subprocess.check_output(\n [adb, \"devices\", \"-l\"]).decode().split('\\n')[1:]\n tmp = {}\n for dev in devices:\n if dev:\n tmp[dev.split()[0]] = dev\n return tmp",
"def get_devices():\n devices = []\n for device_id in range(pm.lib.Pm_CountDevices()):\n devices.append(DeviceInfo(device_id))\n\n return devices",
"def get_devices(self):\n\n md_configstore = os.path.join(\n os.environ['VOLTTRON_HOME'],\n \"configuration_store/platform.driver.store\"\n )\n\n if not os.path.exists(md_configstore):\n _log.debug(\"No master driver currently on this platform.\")\n return {}\n\n statinfo = os.stat(md_configstore)\n\n if self._master_driver_stat_time is None or \\\n self._master_driver_stat_time != statinfo.st_mtime:\n self._master_driver_stat_time = statinfo.st_mtime\n\n # else no change in the md file and we have the same stat time.\n else:\n keys = list(self._devices.keys())\n\n for k in keys:\n new_key = self.get_renamed_topic(k)\n if new_key != k:\n self._devices[new_key] = self._devices[k]\n del self._devices[k]\n\n return self._devices\n\n _log.debug('Getting devices')\n config_list = self.vip.rpc.call(CONFIGURATION_STORE,\n 'manage_list_configs',\n 'platform.driver').get(timeout=5)\n\n _log.debug('Config list is: {}'.format(config_list))\n devices = defaultdict(dict)\n\n for cfg_name in config_list:\n # Skip as we are only looking to do devices in this call.\n if not cfg_name.startswith('devices/'):\n continue\n\n device_config = self.vip.rpc.call('config.store', 'manage_get',\n 'platform.driver',\n cfg_name,\n raw=False).get(timeout=5)\n _log.debug('DEVICE CONFIG IS: {}'.format(device_config))\n\n reg_cfg_name = device_config.get(\n 'registry_config')[len('config://'):]\n _log.debug('Reading registry_config file {}'.format(\n reg_cfg_name\n ))\n registry_config = self.vip.rpc.call('config.store',\n 'manage_get', 'platform.driver',\n reg_cfg_name,\n raw=False).get(timeout=5)\n _log.debug('Registry Config: {}'.format(registry_config))\n\n points = []\n for pnt in registry_config:\n points.append(pnt['Volttron Point Name'])\n\n devices[cfg_name]['points'] = points\n\n return devices",
"def device_info(self) -> Dict[str, any]:\n return {\n \"identifiers\": {(DOMAIN, self._config[CONF_SERIAL])},\n \"name\": self._config[CONF_NAME],\n \"manufacturer\": \"Bosch\",\n }",
"def get_persistent_boot_device(self):\n system = self._get_host_details()\n try:\n # Return boot device if it is persistent.\n if system['Boot']['BootSourceOverrideEnabled'] == 'Continuous':\n device = system['Boot']['BootSourceOverrideTarget']\n if device in DEVICE_RIS_TO_COMMON:\n return DEVICE_RIS_TO_COMMON[device]\n return device\n except KeyError as e:\n msg = \"get_persistent_boot_device failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)\n\n # Check if we are in BIOS boot mode.\n # There is no resource to fetch boot device order for BIOS boot mode\n if not self._is_boot_mode_uefi():\n return None\n\n # Get persistent boot device order for UEFI\n boot_sources, boot_devices = self._get_persistent_boot_devices()\n\n boot_string = \"\"\n try:\n for source in boot_sources:\n if (source[\"StructuredBootString\"] == boot_devices[0]):\n boot_string = source[\"BootString\"]\n break\n except KeyError as e:\n msg = \"get_persistent_boot_device failed with the KeyError:%s\"\n raise exception.IloError((msg) % e)\n\n if 'HP iLO Virtual USB CD' in boot_string:\n return 'CDROM'\n\n elif ('NIC' in boot_string or\n 'PXE' in boot_string or\n \"iSCSI\" in boot_string):\n return 'NETWORK'\n\n elif common.isDisk(boot_string):\n return 'HDD'\n\n else:\n return None",
"def devices_dict(self):\n return self.devices.dict",
"def devices(self):\n return self._recordings.keys()",
"def devices(self):\n return self.enumerate_devices()",
"def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self._uuid)},\n \"name\": self._device.device_data[self._uuid]['name'],\n \"manufacturer\": \"Nest Labs\",\n \"model\": self._device.device_data[self._uuid]['model'],\n }",
"def listDevices(self):\n count = 0\n for device in self:\n count += 1\n printLog(\"Device \" + str(count) + \": '%s %s (%s, %s, %s)'\" % (\n device.make, device.model, device.deviceId, device.androidVersion, device.operator))\n if device.idle:\n printLog(\"[Idle]\")\n else:\n printLog(\"[Busy]\")",
"def collect_existing_mounts():\n result = {}\n for mount in sh.mount().stdout.decode('utf-8').splitlines():\n tokens = mount.split()\n if tokens[1] == 'on' and tokens[0].startswith('/dev/'):\n device = tokens[0][5:]\n result[tokens[2]] = device\n return result",
"def list_devices():\n return _lib.SeaTeaseAPI().list_devices()",
"def get_devices():\n global managed_objects\n global devices_by_adr\n \n devices_by_adr = {}\n \n r = re.compile(\"\\/org\\/bluez\\/hci\\d*\\/dev\\_(.*)\")\n # e.g., match a string like this:\n # /org/bluez/hci0/dev_58_C9_35_2F_A1_EF\n \n for key, value in managed_objects.items():\n # print(\"key=\", key)\n m = r.match(key)\n if m is not None:\n dev_str = m.group(1) # we have a device string!\n # print(\"dev_str=\", dev_str)\n # let's flatten that dict a bit\n devices_by_adr[dev_str] = value[\"org.bluez.Device1\"]",
"def get_devices(self):\n return self.api_request('GET', self.url + '/device', {})",
"def device_info(self):\n return {\n \"identifiers\": {(DOMAIN, self.unique_id)},\n \"name\": self.name,\n \"manufacturer\": \"Brightech\",\n }",
"def get_device_map():\n ret = []\n vlist = subprocess.check_output(['ceph-volume', 'lvm', 'list',\n '--format=json'])\n for osd_id, data in json.loads(vlist.decode('utf8')).items():\n osd_id = normalize_osd_id(osd_id)\n for elem in data:\n for device in elem['devices']:\n ret.append({'id': osd_id, 'path': device})\n return ret",
"def getDevices(i):\n devices = Account['KTFLR'].devices('monpressprod')\n device = devices[i]\n return device"
] | [
"0.65933275",
"0.64047486",
"0.63290364",
"0.62824804",
"0.627704",
"0.6253434",
"0.6251941",
"0.62083673",
"0.62029696",
"0.6141751",
"0.61345845",
"0.6098163",
"0.60664326",
"0.60396963",
"0.6038037",
"0.6015366",
"0.6013724",
"0.6004768",
"0.6000633",
"0.5989116",
"0.5956543",
"0.59317744",
"0.5927542",
"0.5926935",
"0.59190327",
"0.5915958",
"0.5910873",
"0.5909324",
"0.59072244",
"0.59046733"
] | 0.78109515 | 0 |
Retrieves the current setting for the one time boot. | def get_one_time_boot(self):
system = self._get_host_details()
try:
if system['Boot']['BootSourceOverrideEnabled'] == 'Once':
device = system['Boot']['BootSourceOverrideTarget']
if device in DEVICE_RIS_TO_COMMON:
return DEVICE_RIS_TO_COMMON[device]
return device
else:
# value returned by RIBCL if one-time boot setting are absent
return 'Normal'
except KeyError as e:
msg = "get_one_time_boot failed with the KeyError:%s"
raise exception.IloError((msg) % e) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getCurrentSetting(self):\n return {}",
"def get_bootvar(self):\n module = 'bootimage/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n bootdefault = response.json()['bootimage']['oper']['hd-default']\n print(self.device + ' The device is set to boot from: ' + bootdefault + ' in the future')\n return bootdefault",
"def initial(self):\n from setman import settings\n return getattr(settings, self.name, self.default)",
"def settings():\n return _get_settings()[1]",
"def myCurrentSetting(self):\n paramDict = self.getCurrentSetting()\n return paramDict",
"def GetSettingInformation(self):\n if self.cur_uid is None:\n return\n self._get_device_hours()",
"def bootstrap_setting(value):\n return get_bootstrap_setting(value)",
"def get_setting(self, key, default=NOT_SET):\n if key in self.settings:\n return self.settings[key]\n app_key = 'tangled.app.' + key\n if app_key in self.settings:\n return self.settings[app_key]\n if default is NOT_SET:\n raise KeyError(\"'{}' not present in settings\".format(key))\n return default",
"def get_setting(self, id):\n return __settings__.getSetting(id)",
"def get_system_value(name: str):\n return Config.objects.first().__dict__[name]",
"def current_settings(self):\n return {\n 'power_state': self.power_state,\n 'brightness': self.brightness,\n }",
"def get_setting(self, setting):\n return self.do_rpc(\"get_setting\", key=key)",
"def get(self):\n self.value = os.getenv(self.name, self.default)\n return self.value",
"def __returnCurrentSettingLocal__(self):\n return self.dmdParams",
"def getGlobalSetting(self, setting):\n self._cacheConfig()\n settingVal = None\n try:\n settingVal = self._fileCache[setting]\n except KeyError:\n # if no global setting exists, try finding the value as a daily setting\n # (if all days are the same it'll be a global, but otherwise we'll just give today's setting)\n settingVal = self.getDailySetting(getDayFromNum(datetime.datetime.today().weekday()), setting)\n\n return settingVal",
"def getSystemAwake(self):\n print 'start of getSystemAwak() system_awake = {0}'.format(self.system_awake) # TESTING ++++++++++++++++\n try:\n self.db = shelve.open(os.path.join(self.xlocal, 'Launch Manager Utils\\\\launch.data'))\n if self.db['system_awake'] == False:\n print 'start of if true - getSystemAwak() system_awake = {0}'.format(self.system_awake) # TESTING ++++++++++++++++\n self.system_awake = self.db['system_awake']\n self.db.close()\n else:\n self.system_awake = True\n self.db['system_awake'] = self.system_awake\n self.db.close()\n \n print 'End of getSystemAwak() system_awake = {0}'.format(self.system_awake) # TESTING ++++++++++++++++\n \n except Exception, e:\n self.log_file.logEntry('{0}\\nUnable to load previous system_awake value, setting value to True'.format(e))\n self.system_awake = True",
"def get_settings(self):\n return self.settings",
"def config(self):\n return self[CONFIG_KEY]",
"def get_config():\n return CONFIG",
"def get_config():\n return _CONFIG",
"def __returnCurrentSettingLocal__(self):\n return {}",
"def default_value(self):\n return self.__class__.get_setting_default(self.key, **self.get_kwargs())",
"def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })",
"def get_setting(key):\n try:\n from main import flask_app\n return flask_app.config[key]\n except:\n environment = get_environment()\n #Load settings from the corresponding class\n if environment == Config.ENV_PRODUCTION:\n obj = ProductionConfig()\n else:\n obj = TestingConfig()\n return getattr(obj, key)",
"def get_setting_value(self, key, default = None):\n \n if not \"settings\" in self.configuration or not key in self.configuration['settings']:\n return default\n \n return self.configuration['settings'][key]",
"def current_option(self) -> str | None:\n # If the translation key is \"zone_sleep\", we need to translate\n # the value to make it compatible with Home Assistant\n if (\n value := self.capability.current\n ) is not None and self.translation_key == \"zone_sleep\":\n return ZONE_SLEEP_STATE_MAPPING[value]\n\n return value",
"def _get_local_preference(self):\n return self.__local_preference",
"def _get_conf(self):\n self.press_conf = self.sysconf['PressureRegulators']\n return self.press_conf['PressureRegulator%d' % self.id_]",
"def GetCurrent():\n global ENV\n return ENV[threading.current_thread().ident]",
"def _get_config(self):\n return self.__config"
] | [
"0.69801486",
"0.68082726",
"0.6772974",
"0.67710704",
"0.6631858",
"0.6596899",
"0.65748394",
"0.63340545",
"0.6281833",
"0.6263905",
"0.6256233",
"0.6233154",
"0.6194374",
"0.614808",
"0.61427236",
"0.6128543",
"0.61068916",
"0.6082622",
"0.60769004",
"0.6052895",
"0.6045621",
"0.60309285",
"0.6005268",
"0.6004204",
"0.59984726",
"0.5994063",
"0.5988337",
"0.59701717",
"0.59500754",
"0.59436923"
] | 0.6882444 | 1 |
Gets the firmware update service uri. | def _get_firmware_update_service_resource(self):
manager, uri = self._get_ilo_details()
try:
fw_uri = manager['Oem']['Hp']['links']['UpdateService']['href']
except KeyError:
msg = ("Firmware Update Service resource not found.")
raise exception.IloCommandNotSupportedError(msg)
return fw_uri | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_service_url():\n return get_config_handler().get_service_url()",
"def _get_uri(plex_server):\n return plex_server.url(\n \"/:/websockets/notifications\", includeToken=True\n ).replace(\"http\", \"ws\")",
"def get_http_boot_uri(self):\n try:\n sushy_system = self._get_sushy_system()\n http_boot_uri = sushy_system.http_boot_uri.httpbooturi\n except sushy.exceptions.SushyError as e:\n msg = (self._('Not able to find HTTP Boot URI. Error: '\n '%(error)s') %\n {'error': str(e)})\n LOG.debug(msg)\n raise exception.SDFlexError(msg)\n return http_boot_uri",
"def _uri(helper):\n return '/'.join((\n helper.context_meta['server_uri'],\n 'servicesNS',\n 'nobody',\n 'Splunk_TA_paloalto',\n 'storage',\n 'collections',\n 'data',\n 'minemeldfeeds'))",
"def EndpointURI(self):\n return '/'.join(str(x) for x in [self.base_endpoint,self.match,self.resource] if x)",
"def __get_url_addr(self):\n request = urlopen(self.url)\n version = request.readline()\n request.close()\n request = urlparse.urlparse(self.url)\n unparsed_url = urlparse.urlunparse((request.scheme, request.netloc,\n request.path, '', '', ''))\n updated_url = urlparse.urljoin(unparsed_url, version + '/' +\n self.file_name)\n return updated_url",
"def service_endpoint(self) -> str:\n return pulumi.get(self, \"service_endpoint\")",
"def get_uri(self):\n if self._uri is None:\n self._uri = \"{0}{1}/{2}\".format(\n self.session.resource_prefix,\n self.base_uri,\n self.ip_or_ifname_or_group_name,\n )\n\n return self._uri",
"def _get_api_endpoint():\n try:\n return get_service_endpoint(\"apiext\").strip(\"/\")\n except:\n log.warn(\n \"Could not find valid apiext endpoint for links so will use policy engine endpoint instead\"\n )\n try:\n return get_service_endpoint(\"policy_engine\").strip(\"/\")\n except:\n log.warn(\n \"No policy engine endpoint found either, using default but invalid url\"\n )\n return \"http://<valid endpoint not found>\"",
"def service_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"service_url\")",
"def get_overpass_uri() -> str:\n Config.__get()\n assert Config.__config is not None\n return Config.__config.get(\"wsgi\", \"overpass_uri\", fallback=\"https://overpass-api.de\").strip()",
"def endpoint_uri(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"endpoint_uri\")",
"def uri_for_service(self, region, service_id, base_uri):\n return str(URLPath.fromString(base_uri)\n .child(\"service\").child(region).child(service_id).child(\"\"))",
"def uri(cls):\n return f'{cls.app_label}.{cls.name}'",
"def uri(self) -> Optional[str]:\n return pulumi.get(self, \"uri\")",
"def get_uri(self):\n return self.url",
"def _get_webservice_url(self, ws_key):\n if self._webservices.get(ws_key) is None:\n raise PyiCloudServiceNotActivatedException(\n \"Webservice not available\", ws_key\n )\n return self._webservices[ws_key][\"url\"]",
"def uri(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uri\")",
"def get_latest_version_link(self):\n return self.get_latest_version().dbgap_link",
"def get_wsdl_url(self):\n return self.mycam.devicemgmt.GetWsdlUrl()",
"def service_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_url\")",
"def service_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"service_url\")",
"def application_service_path(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_service_path\")",
"def api_endpoint(self, url):\n if urlparse(url).scheme in [\"http\", \"https\"]:\n return url # url is already complete\n return urljoin(f\"{RESOURCE}/{API_VERSION}/\", url.lstrip(\"/\"))",
"def uri(self) -> str:\n return self._uri",
"def get_update_url(self, resource_obj=None, **kwargs):\n\n full_url = getattr(resource_obj, 'full_url', None)\n if full_url:\n return full_url\n\n try:\n update_url = self._generate_url(\n url_type='update', resource_obj=resource_obj, **kwargs\n )\n except ValueError:\n update_url = None\n\n return update_url",
"def getURI(self):\n return _libsbml.SBasePlugin_getURI(self)",
"def getEndpoint(self):\n port = \"\"\n endpoint = \"\"\n keyConfig = self.getKeyConfig()\n\n if \"port\" in keyConfig:\n port = \":\" + keyConfig[\"port\"]\n elif self._data[\"port\"] != self.PORT:\n port = \":\" + self._data[\"port\"]\n\n if \"endpoint\" in keyConfig:\n endpoint = keyConfig[\"endpoint\"]\n else:\n endpoint = self._data[\"endpoint\"]\n\n return \"https://%s%s/%s/\" % (endpoint, port, self._data[\"api_version\"])",
"def _get_base_url(self):\n return 'https://'+self.get_address_and_port_string()",
"def endpoint_uri(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"endpoint_uri\")"
] | [
"0.65430695",
"0.64294636",
"0.64230037",
"0.62257314",
"0.6150541",
"0.60008526",
"0.598782",
"0.59735656",
"0.593328",
"0.59007615",
"0.5866994",
"0.58536565",
"0.5798969",
"0.57935137",
"0.57414603",
"0.5725773",
"0.57218593",
"0.5717393",
"0.569106",
"0.5676379",
"0.5646494",
"0.5646494",
"0.5617997",
"0.5610391",
"0.5605624",
"0.55928123",
"0.55890864",
"0.5585353",
"0.55811614",
"0.55675423"
] | 0.80128765 | 0 |
Get the progress of the firmware update. | def get_firmware_update_progress(self):
try:
fw_update_uri = self._get_firmware_update_service_resource()
except exception.IloError as e:
LOG.debug(self._('Progress of firmware update not known: %s'),
str(e))
return "UNKNOWN", "UNKNOWN"
# perform the GET
status, headers, response = self._rest_get(fw_update_uri)
if status != 200:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
fw_update_state = response.get('State')
fw_update_progress_percent = response.get('ProgressPercent')
LOG.debug(self._('Flashing firmware file ... in progress %d%%'),
fw_update_progress_percent)
return fw_update_state, fw_update_progress_percent | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_firmware_update_status(self):\n\n response = self.execute_command(CMD_GET_FIRMWARE_UPDATE_STATUS)[0]\n inprogress = (response & 0x80) == 0x80\n return {\n \"inprogress\": inprogress,\n \"error\": response & 0x7f,\n }",
"def GetProgress(self):\n return self.new_progress",
"def UpgradeProgress(self):\n if self.force_auto_sync:\n self.get('UpgradeProgress')\n return self._UpgradeProgress",
"def getProgress(self):",
"def progress(self):\n return self.runProgress",
"def progress(self):\n return self.runProgress",
"def progress(self):\n return self.progressValue",
"def get_progress(self):\n return self.cloudserver.progress",
"def progress(self):\n if self.dynamic:\n self._update_db_obj()\n return self._db_obj.progress",
"def progress(self) -> float:\n return self._progress",
"def progress(self) -> float:\n return self._progress",
"def progress(self) -> float:\n return self._progress",
"def progress(self) -> float:\n return self._progress",
"def getProgress(self):\n return self._progress",
"def get_progress(self):\n\t\treturn call_sdk_function('PrlJob_GetProgress', self.handle)",
"def progress(self):\n try:\n return 100.0 * (self.fields['sizeWhenDone'] - self.fields['leftUntilDone']) / float(self.fields['sizeWhenDone'])\n except ZeroDivisionError:\n return 0.0",
"def sound_install_progress(self):\n return SoundInstallStatus(self.send(\"get_sound_progress\")[0])",
"def percent_updated(self):\n return self.percent_complete - self.previous_percent_complete",
"def build_progress(self) -> Union[int, float]:\n return self.proto.build_progress",
"def get_progress(self):\n ret = self.state + \"\\n\"\n self.reset_progress()\n return ret",
"def status(self):\n\t\tstatus = self.thread.status()\n#\t\tprint_array(status)\n\t\tmessage = [\"------ RSYNC PROGRESS ------ \"]\n\t\tif self.log_message:\n\t\t\tmessage.append(self.log_message)\n\t\tmessage.append(\"Current file: %s\" % status['current_file'])\n\t\tmessage.append(\"\\tBytes Copied: %s\" % status['bytes_copied'])\n\t\tmessage.append(\"\\tPercent Done: %s\" % status['percent_done'])\n\t\tmessage.append(\"\\tTransfer Rate: %s\" % status['transfer_rate'])\n\t\tmessage.append(\"\\tTime Remaining: %s\" % status['est_remain'])\n\t\tmessage.append(\"\\tTransfer Number: %s\" % status['xfer_num'])\n\t\tmessage.append(\"\\tTransfers Remaining: %s\" % status['xfer_remain'])\n\t\tmessage.append(\"\\tTransfers Total: %s\" % status['xfer_total'])\n\t\tmessage.append(\"\\t----------------------------------\")\n\t\ttry:\n\t\t\toverall_percent = int(round((int(status['xfer_num'])*1.0)/int(status['xfer_total']),2)*100)\n\t\texcept: overall_percent = 0\n\t\tmessage.append(\"\\tTotal Rsync done: %s%%\\n\" % overall_percent)\n\t\tp = open(self.progress_file,'w+',0)\n\t\tfor line in message:\n\t\t\t#print line\n\t\t\tp.write(\"%s\\n\" % line)\n\t\tp.flush()\n\t\tp.close()",
"def get_progress(self):\r\n return None",
"def get_progress(self, pr, id):\n\t\treturn round((self.handler.file_progress()[id] / pr.length) * 100, )",
"def percentage_update(self):\n\n self.event_update()\n return self.percentage",
"def progress_bar_update() -> str:\n # As we get updates only when the progress bar is updated we need to fix the 'duration' and 'time remaining' parts\n # (time never stops)\n now = datetime.now()\n result = []\n for pb_id in sorted(_DASHBOARD_TQDM_DICT.keys()):\n progress = _DASHBOARD_TQDM_DICT.get(pb_id)\n if progress['success'] and progress['n'] != progress['total']:\n progress['duration'] = str(now - progress['started_raw']).rsplit('.', 1)[0]\n progress['remaining'] = (str(progress['finished_raw'] - now).rsplit('.', 1)[0]\n if progress['finished_raw'] is not None and progress['finished_raw'] > now\n else '-')\n result.append(progress)\n\n return jsonify(result=result)",
"def update_progress(self, value=None):\n if self.main_app is not None:\n if value is not None:\n self.main_app.update_progress(value)\n else:\n if self.total_files != 0:\n self.main_app.update_progress((self.current_file / self.total_files) * 100)",
"def get_status(self):\n return str(self.percentage) + \"%\", self.downloaded, self.speed",
"def progress(self):\n percent = self._infos.get(BulkInsertState.IMPORT_PROGRESS, \"0\")\n return int(percent)",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)",
"def update_progress(self):\n report = self.build_progress_report()\n self.conduit.set_progress(report)"
] | [
"0.71777356",
"0.7156413",
"0.69528806",
"0.69204676",
"0.6803651",
"0.6803651",
"0.6798143",
"0.67429936",
"0.67102855",
"0.67044973",
"0.67044973",
"0.67044973",
"0.67044973",
"0.66923326",
"0.6637259",
"0.6631922",
"0.662532",
"0.6559995",
"0.65389353",
"0.6411595",
"0.6337092",
"0.6326037",
"0.630838",
"0.62910026",
"0.62078714",
"0.61872536",
"0.6162816",
"0.61385465",
"0.608866",
"0.608866"
] | 0.8577004 | 0 |
Retrieves if server is TPM capable or not. | def _get_tpm_capability(self):
tpm_values = {"NotPresent": False,
"PresentDisabled": True,
"PresentEnabled": True}
try:
tpm_state = self._get_bios_setting('TpmState')
except exception.IloCommandNotSupportedError:
tpm_state = "NotPresent"
tpm_result = tpm_values[tpm_state]
return tpm_result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_available():",
"def is_vtd_supported(self):\n\t\treturn bool(call_sdk_function('PrlSrvCfg_IsVtdSupported', self.handle))",
"def evaluate_hardware_support(self):\n return hardware.HardwareSupport.SERVICE_PROVIDER",
"def is_available(self) -> bool:\n return (\n len(self._gpu_ids) > 1\n and \"TORCHELASTIC_RUN_ID\"\n not in os.environ # If otx is executed by torchrun, then otx multi gpu interface is disabled.\n )",
"def is_ctu_capable():\n\n context = package_context.get_context()\n ctu_func_map_cmd = context.ctu_func_map_cmd\n try:\n version = subprocess.check_output([ctu_func_map_cmd, '-version'])\n except (subprocess.CalledProcessError, OSError):\n version = 'ERROR'\n return version != 'ERROR'",
"def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)",
"def is_vserver_kernel():\n\n kinfo = commands.getoutput('/bin/uname -a').split()[2]\n return '-vs' in kinfo",
"def available_t5():\n return _t5_availability",
"def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]",
"def check_hyperv() -> bool:\n try:\n out = subprocess.check_output(\n ['DISM', '/Online', '/Get-FeatureInfo', '/FeatureName:Microsoft-Hyper-V']\n )\n except subprocess.CalledProcessError:\n return False\n\n if 'State : Disabled' in out.decode():\n return False\n\n return True",
"def available(self) -> bool:\n return self._tm_client.api.available",
"def sstcp_enabled():\n return common.POWER_CAP in SYSTEM_CAPS",
"def is_system(self) -> bool:",
"def detect_available():\n global _CUDA_AVAILABLE\n if _CUDA_AVAILABLE is not None: return _CUDA_AVAILABLE\n _CUDA_AVAILABLE = shell.run('{} -c \"import torch;print(torch.cuda.is_available())\"'.format(sys.executable)).strip('\\n') == 'True'\n return _CUDA_AVAILABLE",
"def hasaccelerator():\n\n return torch.cuda.is_available() or torch.backends.mps.is_available() or bool(Models.finddevice())",
"def is_available(cls):\n\n try:\n proc = subprocess.Popen(\n ['systemctl', 'status', 'NetworkManager'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n proc.communicate()\n return proc.returncode == 0\n except OSError:\n return False",
"def is_supported(self) -> bool:\n\n # TODO logging ?\n # TODO ICMP error if ttl is zero\n return self._version == 4 and self._ihl >= 5 and self._ttl != 0",
"def _get_cpu_virtualization(self):\n try:\n cpu_vt = self._get_bios_setting('ProcVirtualization')\n except exception.IloCommandNotSupportedError:\n return False\n if cpu_vt == 'Enabled':\n vt_status = True\n else:\n vt_status = False\n return vt_status",
"def IsAvailable():\n return settings.user.ui.Get('opportunities_showTemp', False)",
"def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0",
"def is_available() -> bool:\n return HAVE_RLE",
"def is_on(self):\n return self._client.get_power()",
"def enable_tpu(self) -> bool:\n return pulumi.get(self, \"enable_tpu\")",
"def is_host_on(self):\n status = False\n cmd = \"/usr/local/bin/wedge_power.sh status\"\n data = run_shell_cmd(cmd)\n Logger.info(\"[FSCD Testing] Executing cmd= [{}]\".format(cmd))\n Logger.info(\"[FSCD Testing] Received data= [{}]\".format(data))\n if \"on\" in data:\n status = True\n Logger.info(\"[FSCD Testing] userver power status {}\".format(status))\n return status",
"def is_nvme(self):\n if self.server_params[-1].bdev_class.value == \"nvme\":\n return True\n return False",
"def get_server_capabilities(self):\n capabilities = {}\n system = self._get_host_details()\n capabilities['server_model'] = system['Model']\n rom_firmware_version = (\n system['Oem']['Hp']['Bios']['Current']['VersionString'])\n capabilities['rom_firmware_version'] = rom_firmware_version\n capabilities.update(self._get_ilo_firmware_version())\n capabilities.update(self._get_number_of_gpu_devices_connected())\n if self._get_tpm_capability():\n capabilities['trusted_boot'] = 'true'\n\n if self._get_cpu_virtualization():\n capabilities['cpu_vt'] = 'true'\n if self._get_nvdimm_n_status():\n capabilities['nvdimm_n'] = 'true'\n try:\n self.get_secure_boot_mode()\n capabilities['secure_boot'] = 'true'\n except exception.IloCommandNotSupportedError:\n # If an error is raised dont populate the capability\n # secure_boot\n pass\n if self._is_sriov_enabled():\n capabilities['sriov_enabled'] = 'true'\n return capabilities",
"def get_capabilities(disk):\n\n #TODO\n return \"Unknown\"",
"def available(self) -> bool:\n return pulumi.get(self, \"available\")",
"def is_system(self) -> undefined.UndefinedOr[bool]:",
"def otp_is_verified(request):\n auth = JSONWebTokenAuthentication()\n jwt_value = auth.get_jwt_value(request)\n if jwt_value is None:\n return False\n\n payload = jwt_decode_handler(jwt_value)\n persistent_id = payload.get('otp_device_id')\n\n if persistent_id:\n device = Device.from_persistent_id(persistent_id)\n if device is not None and device.user_id != request.user.id:\n return False\n # Valid device in JWT\n return True\n return False"
] | [
"0.63089687",
"0.6192992",
"0.6138347",
"0.5952284",
"0.59215266",
"0.58764535",
"0.58491164",
"0.58059984",
"0.5796468",
"0.5784329",
"0.57265705",
"0.56829214",
"0.5666347",
"0.56659436",
"0.56633973",
"0.5661813",
"0.56546235",
"0.56484747",
"0.5633898",
"0.55766743",
"0.5540979",
"0.55345356",
"0.5526762",
"0.5501204",
"0.5492155",
"0.545134",
"0.5440849",
"0.5439995",
"0.5423846",
"0.54234016"
] | 0.7238231 | 0 |
get cpu virtualization status. | def _get_cpu_virtualization(self):
try:
cpu_vt = self._get_bios_setting('ProcVirtualization')
except exception.IloCommandNotSupportedError:
return False
if cpu_vt == 'Enabled':
vt_status = True
else:
vt_status = False
return vt_status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def status(self):\n if self.qemu.is_running():\n status = 0\n self.log.info(\"vm-status\", result=\"online\")\n for device in list(self.qemu.block_info().values()):\n self.log.info(\n \"disk-throttle\",\n device=device[\"device\"],\n iops=device[\"inserted\"][\"iops\"],\n )\n else:\n status = 1\n self.log.info(\"vm-status\", result=\"offline\")\n for volume in self.ceph.volumes:\n locker = volume.lock_status()\n self.log.info(\"rbd-status\", volume=volume.fullname, locker=locker)\n consul = locate_live_service(self.consul, \"qemu-\" + self.name)\n if consul:\n self.log.info(\n \"consul\", service=consul[\"Service\"], address=consul[\"Address\"]\n )\n else:\n self.log.info(\"consul\", service=\"<not registered>\")\n return status",
"def get_cpu_hvt(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuHvt', self.handle)",
"def get_cpu_count(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuCount', self.handle)",
"def getVirtualStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/qemu/%s/status/current' % (node,vmid),None)\n return data",
"def get_cpu_mode(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuMode', self.handle)",
"def VMStatus(self):\n try:\n status = self.vmInstance.get_status()\n LOGGER.info('Current status of virtual machine \"{}\": {}'.format(VM_NAME, status))\n\n except Exception as e:\n status = None\n LOGGER.debug(e)\n LOGGER.error(traceback.format_exc())\n LOGGER.error('An error occured while getting status of virtual machine \"{}\"!'.format(VM_NAME))\n\n return status",
"def get_cpu_accel_level(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuAccelLevel', self.handle)",
"def get_cpu_count(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuCount', self.handle)",
"def get_cpu_mode(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuMode', self.handle)",
"def cpu(self) -> int:\n return pulumi.get(self, \"cpu\")",
"def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")",
"def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")",
"def get_cpu(self):\n pass",
"def checkCpu(self):\n cpu = self.getCpu()\n err_msg = []\n task_result = device_status = 0\n\n if cpu is None:\n err_msg.append('Get CPU info failed')\n task_result = device_status = 1\n else:\n # 以后可扩展告警条件\n pass\n return cpu, err_msg, task_result, device_status",
"def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()",
"def vcpus(self):\n return self._vcpus",
"def cpu(self):\r\n return self._cpu",
"def get_cpu_info():\n try:\n cpu_info = subprocess.check_output('lscpu')\n return cpu_info\n except OSError:\n return None",
"def get_cpu_units(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuUnits', self.handle)",
"def get_cpu_limit(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuLimit', self.handle)",
"def cpu():\n sin = psutil.cpu_percent()\n return round(sin / 100, 3)",
"def _compute_status(self, instance, zone):\n if self.compute_service is None:\n logging.warning('Service unavailable: unable to start GCE VM: %s (%s)',\n instance, zone)\n return\n\n info = self.compute_service.instances().get(\n project=app_identity.get_application_id(),\n instance=instance,\n zone=zone).execute()\n return info[COMPUTE_STATUS]",
"def get_cpu_model(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuModel', self.handle)",
"def getcpuspeed():\n f = os.popen(\"/opt/vc/bin/vcgencmd get_config arm_freq\")\n cpu = f.read()\n return cpu",
"def get_cpu_usage(self):\n\t\treturn call_sdk_function('PrlStatCpu_GetCpuUsage', self.handle)",
"def getCpu(self):\n # todo: 完善不同设备获取信息的方法\n cpu = None\n if self.type in ['E', 'T', 'S', 'K', 'A', 'AX', 'W']:\n m = \"Current cpu utilization :\\s*([\\d\\.]+)%\"\n rt = re.search(m, self.dut.cli(\"show cpu\"))\n if rt:\n cpu = float(rt.groups()[0])\n return cpu",
"def cpu_count_logical():\n return cext.cpu_count_logical()",
"def getcpuusage(self):\n return ord(self.reg(0x11, write=1))",
"def cpuInfo(self, json, i3status_config):\n response = {'full_text': '', 'name': 'cpu_usage'}\n cpu_total, cpu_idle = self.data.cpu()\n used_cpu_percent = 1 - float(cpu_idle-self.cpu_idle)/float(cpu_total-self.cpu_total)\n self.cpu_total = cpu_total\n self.cpu_idle = cpu_idle\n\n \"\"\"\n if used_cpu_percent <= 40/100.0:\n response['color'] = i3status_config['color_good']\n elif used_cpu_percent <= 75/100.0:\n response['color'] = i3status_config['color_degraded']\n else:\n response['color'] = i3status_config['color_bad']\n \"\"\"\n response['color'] = \"#6c71c4\"\n #cpu temp\n CPUTEMP=False\n if CPUTEMP:\n cputemp=subprocess.check_output('sensors | grep \"CPU Temp\" | cut -f 2 -d \"+\" | cut -f 1 -d \" \"',shell=True)\n cputemp=cputemp[:-1].decode('utf-8')\n response['full_text'] = \" %.2f%%\" % (used_cpu_percent*100) +\" \"+cputemp\n else:\n \tresponse['full_text'] = \" %.2f%%\" % (used_cpu_percent*100)\n\n #cache the status for 10 seconds\n response['cached_until'] = time() + 10\n\n return (0, response)",
"def get_online():\n print( \"Online CPUs:\" + \"\".join( f\" {cpu}\" for cpu in _cpu.get_online_cpus() ) )"
] | [
"0.7207931",
"0.71718997",
"0.70512587",
"0.7005931",
"0.696061",
"0.67730594",
"0.66261756",
"0.66120964",
"0.6603125",
"0.65707415",
"0.655431",
"0.655431",
"0.65318954",
"0.6502956",
"0.6492939",
"0.647564",
"0.64564735",
"0.630386",
"0.62673694",
"0.62613034",
"0.6251442",
"0.624607",
"0.6228366",
"0.6196415",
"0.615268",
"0.6134088",
"0.61061525",
"0.610488",
"0.60846835",
"0.60351473"
] | 0.8119382 | 0 |
Get status of NVDIMM_N. | def _get_nvdimm_n_status(self):
try:
nvdimm_n_status = self._get_bios_setting('NvDimmNMemFunctionality')
if nvdimm_n_status == 'Enabled':
nvn_status = True
else:
nvn_status = False
except exception.IloCommandNotSupportedError:
nvn_status = False
return nvn_status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getnumbarvar(self):\n numbarvar_ = ctypes.c_int32()\n res = __library__.MSK_XX_getnumbarvar(self.__nativep,ctypes.byref(numbarvar_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numbarvar_ = numbarvar_.value\n _numbarvar_return_value = numbarvar_\n return (_numbarvar_return_value)",
"def get_status(self):\n return self.o.read_register(self.dev_id, STATUS)",
"def node_status(self) -> Optional['outputs.CSIPowerMaxStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")",
"def node_status(self) -> Optional['outputs.CSIVXFlexOSStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")",
"def node_status(self) -> Optional['outputs.CSIIsilonStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")",
"def node_status(self) -> Optional['outputs.CSIPowerStoreStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")",
"def getNDV(self):\n return len(self.globalDVList)",
"def _do_get_status(self):\n logging.info(__name__ + ' : Get status of the device.')\n result = self._execute('X')\n usage = {\n 0: \"Channel not in use\",\n 1: \"Channel used for Nitrogen level\",\n 2: \"Channel used for Helium Level (Normal pulsed operation)\",\n 3: \"Channel used for Helium Level (Continuous measurement)\",\n 9: \"Error on channel (Usually means probe unplugged)\"\n }\n # current_flowing = {\n # 0 : \"Curent not flowing in Helium Probe Wire\",\n # 1 : \"Curent not flowing in Helium Probe Wire\"\n # }\n # auto_fill_status = {\n # 00 : \"End Fill (Level > FULL)\",\n # 01 : \"Not Filling (Level < FULL, Level > FILL)\",\n # 10 : \"Filling (Level < FULL, Level > FILL)\",\n # 11 : \"Start Filling (Level < FILL)\"\n # }\n return usage.get(int(result[1]), \"Unknown\")",
"def robotiq_get_status(self, number_of_registers=3):\r\n return self._arm.robotiq_get_status(number_of_registers=number_of_registers)",
"def nwmetricmepstatus(self) :\n\t\ttry :\n\t\t\treturn self._nwmetricmepstatus\n\t\texcept Exception as e:\n\t\t\traise e",
"def getnumbarvar(self): # 3\n res,resargs = self.__obj.getnumbarvar()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numbarvar_return_value = resargs\n return _numbarvar_return_value",
"def get_status(self):\n return self.read_register(259, 0, 3)",
"def getVirtualStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/qemu/%s/status/current' % (node,vmid),None)\n return data",
"def getnumvar(self):\n numvar_ = ctypes.c_int32()\n res = __library__.MSK_XX_getnumvar(self.__nativep,ctypes.byref(numvar_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numvar_ = numvar_.value\n _numvar_return_value = numvar_\n return (_numvar_return_value)",
"def node_status(self) -> Optional['outputs.CSIUnityStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")",
"def _nvidia_smi():\n\n status = check_output(['nvidia-smi', \n '--query-gpu=utilization.gpu,utilization.memory', \n '--format=csv'])\n status = pd.read_csv(StringIO(status.decode('utf-8')))\n \n # Reformat column names.\n # (Need the col.strip() because sometimes there are preceding spaces)\n map_cols = {'utilization.gpu [%]': 'Utilization (%)',\n 'utilization.memory [%]': 'Memory (%)'}\n status.columns = [map_cols[col.strip()] for col in status.columns]\n\n # Convert to numerical data\n for col in status.columns:\n status[col] = status[col].apply(lambda x: int(x.rstrip('%')))\n\n return status",
"def get_state(self):\n\t\treturn call_sdk_function('PrlVmInfo_GetState', self.handle)",
"def queryStatus (self) :\n\n return self.sendCommand(\"CMD_IN_QUERY_STATUS\", \"\")",
"def getnumbarcnz(self):\n nz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getnumbarcnz(self.__nativep,ctypes.byref(nz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nz_ = nz_.value\n _nz_return_value = nz_\n return (_nz_return_value)",
"def getnumbaranz(self):\n nz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getnumbaranz(self.__nativep,ctypes.byref(nz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nz_ = nz_.value\n _nz_return_value = nz_\n return (_nz_return_value)",
"def read_connected_emu_snr(self):\n snr = ctypes.c_uint32()\n\n result = self._lib.NRFJPROG_read_connected_emu_snr(ctypes.byref(snr))\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)\n\n return snr.value",
"async def get_status(self) -> str:\n return await self.hw_device.status()",
"def calc_nmi(ground_truth, communities_detected):\n calculated_nmi = nmi(ground_truth, communities_detected)\n return calculated_nmi",
"def get_nix(self):\n return self.dim",
"def _get_status(self, numBytes=6):\n numRegs = int(ceil(numBytes/2.0))\n\n # To do!: Implement try/except\n # Get status from the device\n response = self.client.read_holding_registers(0x07D0, numRegs, unit=0x0009)\n\n # Instantiate output as an empty list\n output = []\n\n # Fill the output with the bytes in the appropriate order\n for i in range(0, numRegs):\n output.append((response.getRegister(i) & 0xFF00) >> 8)\n output.append(response.getRegister(i) & 0x00FF)\n\n # Output the result\n return output",
"def status(self) -> pulumi.Output['outputs.VirtualHardDiskStatusResponse']:\n return pulumi.get(self, \"status\")",
"def vnN(self):\n return np.array(\n [x for x in [self.nNx, self.nNy, self.nNz] if x is not None],\n dtype=int\n )",
"def getnumintvar(self):\n numintvar_ = ctypes.c_int32()\n res = __library__.MSK_XX_getnumintvar(self.__nativep,ctypes.byref(numintvar_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numintvar_ = numintvar_.value\n _numintvar_return_value = numintvar_\n return (_numintvar_return_value)",
"def handle_nmi(self):\n print \"NMI HANDLER\"\n self.push_word(self.get_register('PC'))\n self.push_byte(self.get_register('P'))\n self.set_flag('I', 1)\n\n # MMM: somewhere we should check if NMIs are disabled in the status register?\n # jump to the NMI vector\n target = self.read_mem_word(self.nmi_vector)\n self.set_pc(target)\n return True",
"def getN(self):\r\n return self.N"
] | [
"0.5753764",
"0.57007307",
"0.5678485",
"0.56483656",
"0.5635664",
"0.5618904",
"0.559551",
"0.5575454",
"0.5551961",
"0.5545708",
"0.5526919",
"0.55176646",
"0.54776466",
"0.54100347",
"0.54067737",
"0.5321118",
"0.5198335",
"0.51954293",
"0.51463753",
"0.512988",
"0.5129837",
"0.5119609",
"0.5119074",
"0.5078924",
"0.50723445",
"0.5028012",
"0.50212806",
"0.5001112",
"0.49810457",
"0.49771798"
] | 0.81031567 | 0 |
Determina numere divizibile cu k dintro lista | def get_longest_div_k(lst, k):
rezultat = []
for x in lst:
if x % k == 0:
rezultat.append(x)
return rezultat | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getDivisors(n):",
"def divisor(k, num):\n\n if k < 0:\n raise Exception('k must be >= 0: {}'.format(k))\n\n factors = prime_factorization(num)\n result = 1\n if k == 0:\n for prime in factors:\n result *= prime + 1\n\n for prime in factors:\n result *= ((pow(prime, (factors[prime] + 1) * k) - 1) //\n (prime ** k - 1))\n return result",
"def calc(k):\n n = factorial(4*k) * (1103.0 + 26390.0*k)\n d = factorial(k)**4 * 396.0**(4.0*k)\n z = n/d\n return z",
"def C(n,k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0",
"def count_k(n, k):\n if n == 0:\n return 1\n elif n < 0:\n return 0\n else:\n total = 0\n i = 1\n while i <= k:\n total += count_k(n - i, k)\n i += 1\n return total",
"def beautifulSubsets(self, nums: List[int], k: int) -> int:\n\n \"\"\"\n queue = deque([([], -1)])\n res = 0\n\n while queue:\n cur, idx = queue.popleft()\n res += 1\n\n for i in range(idx + 1, len(nums)):\n if nums[i] - k in cur or nums[i] + k in cur:\n continue\n\n queue.append((cur + [nums[i]], i))\n\n return res - 1\n \"\"\"\n\n \"\"\"\n # dp0 is the ways that without A[i]\n # dp1 is the ways that with A[i]\n\n count = [Counter() for i in range(k)]\n for n in nums:\n count[n % k][n] += 1\n\n res = 1\n for i in range(k):\n prev, dp0, dp1 = 0, 1, 0\n for n in sorted(count[i]):\n v = pow(2, count[i][n])\n if prev + k == n:\n dp0, dp1 = dp0 + dp1, dp0 * (v - 1)\n else:\n dp0, dp1 = dp0 + dp1, (dp0 + dp1) * (v - 1)\n\n prev = n\n\n res *= dp0 + dp1\n\n return res - 1\n \"\"\"\n\n # Count the frequency of A, and then consider all the arithmetic sequence with difference k.\n # Each arithmetic sequence can be solve as a hourse robber problem.\n # We solve the hourse robber by dp.\n # dp(a) return the result for sequence no bigger than a.\n\n # dp(a)[0] is the ways that without a\n # dp(a)[1] is the ways that with a\n\n # dp(a)[0] = dp(a - k)[0] + dp(a - k)[1]\n # dp(a)[1] = dp(a - k)[0] * (2 ^ count(a) - 1\n\n count = Counter(nums)\n\n def dp(n):\n dp0, dp1 = dp(n - k) if n - k in count else (1, 0)\n return dp0 + dp1, dp0 * (pow(2, count[n]) - 1)\n\n return functools.reduce(operator.mul, (sum(dp(n)) for n in count if not count[n + k])) - 1",
"def divisior(n: int) -> list:\n j = [n]\n for d in range(n+1): #loop bis n\n d > 0",
"def choose(n, k):\r\n if 0 <= k <= n:\r\n ntok = 1\r\n ktok = 1\r\n for t in range(1, min(k, n - k) + 1):\r\n ntok *= n\r\n ktok *= t\r\n n -= 1\r\n return ntok // ktok\r\n else:\r\n return 0",
"def solution2(nums, K):\n s = 0\n sum_til = []\n for n in nums:\n s += n\n sum_til.append(s)\n\n l = len(nums)\n for i in range(l):\n for j in range(i+1, l):\n sum_ij = sum_til[j] if i == 0 else sum_til[j] - sum_til[i-1]\n if K != 0 and sum_ij % K == 0:\n return True\n if K == 0 and sum_ij == 0:\n return True\n return False",
"def _get_m(self, ks: List[int]) -> int:\n\n base = 1\n for c in ks:\n base = base * c // gcd(base, c)\n return base",
"def count_kmers_possible(read, k):\n num_kmers = {}\n num_kmers1 = len(read) - k + 1\n num_kmers2 = 4**k\n#num_kmers.append(min(num_kmers1,num_kmers2))\n num_kmers = min(num_kmers1,num_kmers2)\n num_kmers3 = max(num_kmers,0)\n return(num_kmers3)",
"def divisor_k_lookup(up_to, k):\n div = defaultdict(lambda: 1)\n div[1] = 1\n\n for i in xrange(2, up_to):\n for j in xrange(i, up_to, i):\n div[j] += i**k\n\n return div",
"def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:\n\n if not nums:\n return 0\n\n if k <= 1:\n return 0\n\n count = 0\n lo = 0\n product = 1\n for hi in range(len(nums)):\n product *= nums[hi]\n while product >= k:\n product /= nums[lo]\n lo += 1\n count += hi - lo + 1\n return count",
"def chosse(n,k):\n import math \n if (n>=k and k>=0):\n return math.factorial(n) / (math.factorial(k) * math.factorial(n-k))\n else:\n return \"No se puede calcular el numero factorial indicado\"",
"def choose(n, k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0",
"def choose(n, k):\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0",
"def main(l, k):\n S = 0\n T = product(xrange(2), repeat=k)\n for ts in T:\n tmp = []\n\n for t, c in zip(ts, cs):\n tmp.append(((-1)*c)**t)\n\n S += (sum(tmp)**l)\n val = (sum(tmp)**l)\n print val\n return S / float(2**(k))",
"def main(l, k):\n S = 0\n T = product(xrange(2), repeat=k)\n for ts in T:\n tmp = []\n\n for t, c in zip(ts, cs):\n tmp.append(((-1)*c)**t)\n\n S += (sum(tmp)**l)\n val = (sum(tmp)**l)\n print val\n return S / float(2**(k))",
"def ndcg_at_k(self, r, k, method=0):\n # print(\"sorted:\" + str(sorted(r, reverse=True)))\n # 排完序最理想的結果分數\n dcg_max = self.dcg_at_k(sorted(r, reverse=True), k, method)\n # print(\"dcg_max:\" + str(dcg_max))\n if not dcg_max:\n return 0.\n return self.dcg_at_k(r, k, method) / dcg_max",
"def diviseur(n):\n s = 0\n for i in range (1, n):\n if n%i == 0:\n s += 1\n print(i)\n return \"Le nombre de diviseurs est\", s",
"def cdf(self, k):\n\n if k < 0 or k > self.n:\n return 0\n\n k = int(k)\n ans = 0\n for i in range(0, k + 1):\n ans += self.pmf(i)\n return ans",
"def workersNeeded(k, m):\n # formula: k/m\n from math import ceil\n return ceil(float(k)/float(m))",
"def choose(n, k):\n # http://stackoverflow.com/a/3025547/313967\n if 0 <= k <= n:\n ntok = 1\n ktok = 1\n for t in xrange(1, min(k, n - k) + 1):\n ntok *= n\n ktok *= t\n n -= 1\n return ntok // ktok\n else:\n return 0",
"def answer(l):\n num_divisors = [0] * len(l)\n triple_count = 0\n for large in range(1, len(l)):\n for small in range (0, large):\n if l[large] % l[small] == 0:\n num_divisors[large] += 1\n triple_count += num_divisors[small]\n return triple_count",
"def get_k(self, n, m):\n k = m/n * log(2)\n return int(k)",
"def partition(n, ks):\n if type(ks) not in (list, tuple):\n raise TypeError('ks must be an iterable')\n if not ks:\n raise ValueError('ks must have at least one value')\n elif min(ks) < 0:\n raise ValueError('group size k must be non-negative')\n num = _math.factorial(n)\n den = 1\n for k in ks:\n den *= _math.factorial(k)\n return int(num / den)",
"def nCkarray(*k_values):\n result = 1\n for i, j in enumerate((m for k in k_values for m in range(1, k+1)), 1):\n result = (result * i) // j\n return result",
"def find_subarrays(nums, k):\n res = pre_sum = 0\n dic = {0: 1}\n for i in nums:\n pre_sum += i\n res += dic.get(pre_sum - k, 0)\n dic[pre_sum] = dic.get(pre_sum, 0) + 1\n return res",
"def divide(self, k, endless=False):\n ### ERROR: Insufficient volume. ###\n v = self.volume / k\n #vols = [v for i in xrange(k)]\n #vols[-1] = self.volume - sum(vols[:-1])\n if self.endless:\n return [self.aliquot(v, endless) for i in xrange(k)]\n else:\n samples = [self.aliquot(v, endless) for i in xrange(k-1)]\n samples.append(self.aliquot(self.volume, endless))\n return samples",
"def all_kmers(k):\n for i in range(0, 4 ** k):\n res = number_to_kmer(i, k)\n yield res"
] | [
"0.68305314",
"0.66830385",
"0.64528286",
"0.6440956",
"0.64300156",
"0.6414138",
"0.640048",
"0.63713896",
"0.6357683",
"0.63318574",
"0.63268465",
"0.6303533",
"0.62746555",
"0.6266593",
"0.62309676",
"0.62309676",
"0.61922175",
"0.61922175",
"0.61577624",
"0.61330014",
"0.6132927",
"0.61119103",
"0.60999954",
"0.60972303",
"0.6066882",
"0.60249233",
"0.6024923",
"0.6023364",
"0.6001323",
"0.59818363"
] | 0.70152783 | 0 |
Linac phasing Note that these overlays override individual klystron phases. | def bmad_linac_phasing_lines(epics):
lines = [
'! Linac overall phasing',
'O_L1[phase_deg] = 0 ! K21_1 sets this directly. This is a delta on top of that.',
'O_L2[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:CALC204')),
'O_L3[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:AO499'))
]
return lines | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_display_from_lines(self):\n y = 1\n maxlin = CA_World.ca_display_size - 1\n limy = len(self.ca_lines) + maxlin\n for i in self.ca_lines:\n x = 1\n if limy >= maxlin:\n if SimEngine.gui_get('init') == \"Right\": # Right\n limx = len(i) + maxlin + 2\n for j in range(len(i) - 2):\n if limx >= maxlin:\n b = bool(i[j])\n self.pixel_tuple_to_patch(\n ((maxlin - len(i) + 2 + x) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n else:\n limx -= 1\n elif SimEngine.gui_get('init') == \"Left\": # Left\n limx = 0\n for j in range(len(i) - 2):\n if limx <= maxlin + 2:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((x - 3) * 4, (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(\n b)\n x += 1\n limx += 1\n else: # Center and Random\n limx = int((len(i) - maxlin) / 2)\n k = 0\n for j in range(len(i)):\n if limx < 0:\n b = bool(i[j])\n self.pixel_tuple_to_patch(((maxlin - len(i) + x - 1 + limx) * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n else:\n if k < maxlin + 1:\n b = bool(i[j + limx])\n self.pixel_tuple_to_patch((k * 4,\n (maxlin - len(self.ca_lines) + y) * 4)).set_on_off(b)\n x += 1\n k += 1\n y += 1\n else:\n limy -= 1",
"def add_lvs_correspondence_points(self):\n\n pin = self.rbl_inv_inst.get_pin(\"A\")\n self.add_label_pin(text=\"bl[0]\",\n layer=pin.layer,\n offset=pin.ll(),\n height=pin.height(),\n width=pin.width())\n\n pin = self.dc_inst.get_pin(\"out\")\n self.add_label_pin(text=\"delayed_en\",\n layer=pin.layer,\n offset=pin.ll(),\n height=pin.height(),\n width=pin.width())",
"def drawWarpLines(self):\n # draw warp lines\n for item in self.game.warpLines:\n anwp.sl.engine.drawLine(item[0]+self.bufferX, item[1]+self.bufferY, item[2]+self.bufferX, item[3]+self.bufferY, pyui.colors.blue)",
"def road_lines():\n cv2.polylines(frame_1, [pts_1], True, yellow_color)\n cv2.polylines(frame_2, [pts_2], True, yellow_color)",
"def _on_lane_invasion(self, event):\n self.lanes_invaded = event.crossed_lane_markings",
"def draw_lines(img, lines, color=[0, 0, 255], thickness=10):\n \n yFinal = 540 # tweak these values as per the frame size\n yIni = 350\n xPlus = []\n yPlus = []\n xMinus = []\n yMinus= []\n slope_range = 0.2\n\n if lines is not None:\n for line in lines:\n if line is not None:\n for x1,y1,x2,y2 in line:\n # check slope \n slope = (y2-y1)/(x2-x1)\n\t\t \n \t\t # Collect all points with + ve slope (right lane)\n if (slope > slope_range):\n xPlus.append(x1)\n xPlus.append(x2)\n yPlus.append(y1)\n yPlus.append(y2)\n\n # Collect all points with - ve slope (left lane)\n elif ((slope) < (-slope_range)):\n xMinus.append(x1)\n xMinus.append(x2)\n yMinus.append(y1)\n yMinus.append(y2)\n # If out of range, lists defined in beginning of this function will be empty \n else:\n continue\n \n # draw right lane\n x1,y1,x2,y2 = fit_line(xPlus, yPlus, yIni, yFinal)\n cv2.line(img,(x1,y1),(x2,y2),color, thickness) \n\n # draw left lane\n x1,y1,x2,y2 = fit_line(xMinus, yMinus, yIni, yFinal)\n cv2.line(img,(x1,y1),(x2,y2),color,thickness)",
"def set_lanes(left_lines, right_lines, image):\n \n Y_LANE_EXTRAP = 35 # percent up from bottom of image to extrapolate lane lines\n \n image_wk = np.copy(image) # working copy\n image_lines = np.copy(image_wk)*0 # create a blank to draw lines on\n im_y = image_wk.shape[0]\n \n y1_lane = im_y\n y2_lane = np.int32(im_y - (Y_LANE_EXTRAP/100*im_y))\n \n # Process left lane\n if left_lines:\n z_left = my_linear_polyfit(left_lines)\n x1_lane = np.int32( (y1_lane - z_left[1]) / z_left[0] ) # x = (y-b)/m\n x2_lane = np.int32( (y2_lane - z_left[1]) / z_left[0] )\n \n # Draw left lane on blank image\n cv2.line(image_lines, (x1_lane, y1_lane), (x2_lane, y2_lane), (100,100,100), 15)\n \n # Process right lane\n if right_lines:\n z_right = my_linear_polyfit(right_lines)\n x1_lane = np.int32( (y1_lane - z_right[1]) / z_right[0] ) # x = (y-b)/m\n x2_lane = np.int32( (y2_lane - z_right[1]) / z_right[0] )\n \n # Draw right lane on blank image\n cv2.line(image_lines, (x1_lane, y1_lane), (x2_lane, y2_lane), (100,100,100), 15)\n \n # Overlay detected left/right lanes on road image\n image_wk = weighted_img(image_lines, image_wk)\n \n # Output road image with overlaid left/right lanes\n return image_wk",
"def palm_land(self):\n self.palm_landing = True\n self.drone.palm_land()",
"def lane(self, mask, win_color = None):\n\n # the nonzero point\n solid = np.nonzero(mask)\n sx, sy = solid[1], solid[0]\n\n # make a image to draw on\n out_img = np.dstack([np.zeros_like(mask)]*3)*255\n if self.fit is None:\n # get the intial poly line for window sliding\n\n # get the midpoint for both line, expecting it shows up in the lower half\n self.h, self.w = mask.shape\n self.midpoint = self.w//2\n self.win_height = self.h//self.nb_win\n\n curv_head = self.h//self.frac\n histogram = np.sum(mask[:curv_head, :], axis = 0)\n mid_l = np.argmax(histogram[:self.midpoint])\n mid_r = np.argmax(histogram[self.midpoint:]) + self.midpoint\n\n # the indice for solid pixel in left and right\n l_lane_idc = []\n r_lane_idc = []\n\n # slide the windows down up\n btm = self.h\n for n in range(self.nb_win):\n # right window\n ul_l = (mid_l - self.half, btm - self.win_height)\n lr_l = (mid_l + self.half, btm)\n\n # left window\n ul_r = (mid_r - self.half, btm - self.win_height)\n lr_r = (mid_r + self.half, btm)\n\n\n # draw the retangle on the image\n if win_color:\n cv2.rectangle(out_img, lr_l, ul_l, win_color, 2)\n cv2.rectangle(out_img, lr_r, ul_r, win_color, 2)\n\n\n # the indice within window\n within_l = ((sx>=ul_l[0]) & \\\n (sx<=lr_l[0]) & \\\n (sy>=ul_l[1]) & \\\n (sy<=lr_l[1])).nonzero()[0]\n\n within_r = ((sx>=ul_r[0]) & \\\n (sx<=lr_r[0]) & \\\n (sy>=ul_r[1]) & \\\n (sy<=lr_r[1])).nonzero()[0]\n\n # append to the lane\n l_lane_idc.append(within_l)\n r_lane_idc.append(within_r)\n\n if len(within_r) > self.minpix:\n mid_r = np.int(np.mean(sx[within_r]))\n if len(within_l) > self.minpix:\n mid_l = np.int(np.mean(sx[within_l]))\n btm -= self.win_height\n\n # concatenate the windows\n l_lane_idc = np.concatenate(l_lane_idc)\n r_lane_idc = np.concatenate(r_lane_idc)\n try:\n self.fit = [np.polyfit(sy[l_lane_idc], sx[l_lane_idc], 2),\n np.polyfit(sy[r_lane_idc], sx[r_lane_idc], 2)]\n except:\n return out_img\n\n\n else:\n # if we've fitted the lane, use that as guide\n l_fit, r_fit = self.fit\n l_lane_idc = ((sx >= np.polyval(l_fit, sy) - self.half) &\n (sx <= np.polyval(l_fit, sy) + self.half)).nonzero()[0]\n r_lane_idc = ((sx >= np.polyval(r_fit, sy) - self.half) &\n (sx <= np.polyval(r_fit, sy) + self.half)).nonzero()[0]\n\n\n curv_head = self.h//self.frac\n l_curv_count = np.sum((sy >= curv_head) & (sx <= self.midpoint))\n r_curv_count = np.sum((sy >= curv_head) & (sx >= self.midpoint))\n\n if l_curv_count >= self.curv_count:\n try: self.fit[0] = np.polyfit(sy[l_lane_idc], sx[l_lane_idc], 2)\n except: pass\n if r_curv_count >= self.curv_count:\n try: self.fit[1] = np.polyfit(sy[r_lane_idc], sx[r_lane_idc], 2)\n except: pass\n\n # draw the lane area\n l_fit, r_fit = self.fit\n y_cord = np.linspace(0, self.h - 1, self.h)\n lane_l = np.polyval(l_fit, y_cord)\n lane_r = np.polyval(r_fit, y_cord)\n\n\n if not win_color:\n pts_l = np.array([np.vstack([lane_l, y_cord]).T])\n pts_r = np.array([np.flipud(np.vstack([lane_r, y_cord]).T)])\n\n pts = np.hstack((pts_l, pts_r))\n cv2.fillPoly(out_img, np.int_(pts), [0, 100, 0])\n\n # draw red on left\n out_img[sy[l_lane_idc], sx[l_lane_idc]] = RED\n # draw blue on right\n out_img[sy[r_lane_idc], sx[r_lane_idc]] = BLUE\n\n\n # put text showing meters away center and radius\n l_btm = np.polyval(l_fit, self.h)\n r_btm = np.polyval(r_fit, self.h)\n mpp = self.lane_width/(r_btm - l_btm) # meters per pixel\n\n mid_lane = int((r_btm + l_btm)/2)\n dev = (self.midpoint - mid_lane)\n radius = np.mean(self.curvature(mpp))\n\n side = ''\n side = 'L' if dev < 0 else 'R'\n dev_text = (\"%.2fm %s\"%(np.abs(mpp*dev), side))\n radius_text = (\"RADIUS %.2fm\"%(radius)) if radius < 2000 else 'STRAIGHT'\n\n (dev_w, dev_h), _ = cv2.getTextSize(dev_text,\n fontFace = cv2.FONT_HERSHEY_SIMPLEX,\n fontScale = 1, thickness = 2)\n\n (radius_w, radius_h), _ = cv2.getTextSize(radius_text,\n fontFace = cv2.FONT_HERSHEY_SIMPLEX,\n fontScale = 1, thickness = 3)\n\n\n dev_org = (int(mid_lane + 2*dev - dev_w//2), self.h - 30)\n radius_org = (int(mid_lane - radius_w//2), self.h - 80)\n\n\n\n cv2.line(out_img, (mid_lane, self.h - 20),\n (mid_lane, self.h - 40 - dev_h),\n color = [255,255,255], thickness = 3)\n\n cv2.putText(out_img, radius_text,\n fontFace = cv2.FONT_HERSHEY_SIMPLEX,\n fontScale = 1, thickness = 3,\n org = radius_org, color = [0, 0, 0])\n\n cv2.putText(out_img, dev_text,\n fontFace = cv2.FONT_HERSHEY_SIMPLEX,\n fontScale = 1, thickness = 2,\n org = dev_org, color = [0, 0, 0])\n\n return out_img",
"def palm_land(self):\n log.debug(\"PALM_LAND\")\n self.drone.palm_land()",
"def process_laneOffset(self):\n center_line = np.poly1d(np.mean([self.line_l.get_LinePoly().coeffs, self.line_r.get_LinePoly().coeffs], axis=0))\n # store the center line polynomial\n self.center_poly = center_line\n center_point = IMAGE_WIDTH/2 - center_line(709)\n offset_from_center =center_point* self.line_l.x_pxm\n self.lane_offset = offset_from_center\n return center_point",
"def line_layer(self):\n screen_origin = self.ids.mapview.get_window_xy_from(lat1, lon1, self.ids.mapview.zoom)\n screen_destination = self.ids.mapview.get_window_xy_from(lat2, lon2, self.ids.mapview.zoom)\n point_list = [screen_origin[0], screen_origin[1], screen_destination[0], screen_destination[1]]\n\n with self.ids.line.canvas:\n self.ids.line.canvas.clear()\n\n Color(0, 0, 0, .6)\n Line(points=point_list, width=3, joint=\"bevel\")",
"def ActiveHlt2Lines(self) :\n\n lines = [\n 'Hlt2SingleMuon',\n 'Hlt2SingleMuonHighPT',\n 'Hlt2SingleMuonLowPT',\n ]\n \n return lines",
"def draw_lh_lines(data):\n #hnd = extract_left_hand(data);\n hnd = np.array(data['crop']);\n hand.draw_hand_lines(hnd,data['lhkpss'][data['i']]);\n return hnd;",
"def add_layout_pins(self):\n en_offset = self.dc_inst.get_pin(\"in\").ll()\n self.add_layout_pin(text=\"en\",\n layer=\"metal1\",\n offset=en_offset.scale(1,0),\n width=self.m1_width,\n height=en_offset.y)\n\n out_offset = self.rbl_inv_inst.get_pin(\"Z\").ll()\n self.add_layout_pin(text=\"out\",\n layer=\"metal1\",\n offset=out_offset.scale(1,0),\n width=self.m1_width,\n height=out_offset.y)",
"def lla(self, input_poly):\n # check the input\n if type(input_poly) is not Polygon:\n #if not isinstance(input_poly, Polygon):\n # if we weren't given a polygon, turn the coordinates into one\n if (type(input_poly) is np.ndarray) or (type(input_poly) is list):\n input_poly = Polygon(input_poly)\n else:\n return\n # set the internal value for lla shape\n self._lla_shape = input_poly\n # get the vertex coordinates for the lla shape\n lla_coords_temp = np.array(self._lla_shape.exterior.xy).T\n # add a column of zeros for altitude (shapely is 2D..)\n lla_coords = np.zeros((lla_coords_temp.shape[0], 3))\n lla_coords[:,:-1] = lla_coords_temp\n # convert lla vertices to ned\n ned_coords = lla2ned(lla_coords, self._ref_pt)\n # make the ned shape out of these coordinates\n ned_exterior = Polygon(ned_coords)\n\n # make a unified shape for the keep out zones\n keep_out_list = []\n for shape in input_poly.interiors:\n # convert keepout coords to ned\n shape = Polygon(shape)\n lla_coords_temp = np.array(shape.exterior.xy).T\n # add a column of zeros for altitude (shapely is 2D..)\n lla_coords = np.zeros((lla_coords_temp.shape[0], 3))\n lla_coords[:,:-1] = lla_coords_temp\n # convert lla vertices to ned\n ned_coords = lla2ned(lla_coords, self._ref_pt)\n # add this region to the list\n keep_out_list.append(Polygon(ned_coords) )\n keep_out = cascaded_union(keep_out_list)\n\n # now make a valid mission area polygon\n self._ned_shape = ned_exterior.difference(keep_out)",
"def mark_lane_lines(undist, warped, ploty, left_fitx, right_fitx, Minv):\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (warped.shape[1], warped.shape[0])) \n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n return result",
"def overlay_lines(self, p1, p2, FT, frame):\n \n if p1 == p2:\n self.show_dif_class_msg()\n \n else:\n a1 = complete_scores[p1, p2][0]\n a2 = complete_scores[p1, p2][1]\n projection1 = make_1D(extract_2D[p1], a1)\n projection2 = make_1D(extract_2D[p2], a2)\n\n if FT: \n pad_p1 = np.pad(projection1.vector, pad_width=(0, shape-projection1.size()))\n pad_p2 = np.pad(projection2.vector, pad_width=(0, shape-projection2.size()))\n A = abs(np.fft.rfft(pad_p1))\n B = abs(np.fft.rfft(pad_p2))\n \n f = Figure(figsize=(8,4))\n ax = f.add_subplot(111)\n\n ax.bar(range(len(A)), A, alpha=0.35, color='deepskyblue', ec='k', linewidth=1)\n ax.bar(range(len(B)), B, alpha=0.35, color='yellow', ec='k', linewidth=1)\n \n ax.get_xaxis().set_ticks([])\n ax.set_xlabel('frequency component')\n ax.set_ylabel('Amplitude')\n\n else:\n a2_flip = complete_scores[p1, p2][1] + 180\n projection2_flip = make_1D(extract_2D[p2], a2_flip)\n\n score_default, r, c = slide_score(projection1, projection2) # Score and location of optimum\n score_flip, r_flip, c_flip = slide_score(projection1, projection2_flip) # Score of phase flipped\n\n if score_default <= score_flip:\n ref_intensity, comp_intensity = r, c\n else:\n ref_intensity, comp_intensity = r_flip, c_flip\n\n f = Figure(figsize=(8,4))\n ax = f.add_subplot(111)\n\n x_axis_max = len(ref_intensity)\n y_axis_max = max(np.amax(ref_intensity), np.amax(comp_intensity))\n y_axis_min = min(np.amin(ref_intensity), np.amin(comp_intensity))\n\n ax.plot(ref_intensity, color='black')\n ax.plot(comp_intensity, color='black')\n\n ax.fill_between(range(len(ref_intensity)), ref_intensity, alpha=0.35, color='deepskyblue')\n ax.fill_between(range(len(comp_intensity)), comp_intensity, alpha=0.35, color='yellow')\n\n ax.set_ylabel('Intensity')\n ax.set_ylim([y_axis_min, (y_axis_max + 0.025*y_axis_max)])\n ax.xaxis.set_visible(False)\n\n f.tight_layout()\n\n if self.projcanvas:\n self.projcanvas.get_tk_widget().destroy()\n self.projtoolbar.destroy()\n\n self.projcanvas = FigureCanvasTkAgg(f, frame)\n self.projcanvas.draw()\n self.projcanvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n self.projcanvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n self.projtoolbar = NavigationToolbar2Tk(self.projcanvas, frame)\n self.projtoolbar.update()",
"def vertival_lines_iglu():\n # top first line\n line(screen, BLACK, (180, 400), (170, 440))\n line(screen, BLACK, (220, 400), (230, 440))\n # second line\n line(screen, BLACK, (150, 438), (140, 480))\n line(screen, BLACK, (200, 438), (200, 480))\n line(screen, BLACK, (250, 438), (260, 480))\n # third line\n line(screen, BLACK, (115, 477), (95, 525))\n line(screen, BLACK, (170, 480), (165, 528))\n line(screen, BLACK, (235, 480), (240, 528))\n line(screen, BLACK, (285, 480), (300, 528))\n # forth line\n line(screen, BLACK, (70, 525), (60, 570))\n line(screen, BLACK, (125, 530), (115, 580))\n line(screen, BLACK, (200, 530), (200, 580))\n line(screen, BLACK, (270, 530), (275, 580))\n line(screen, BLACK, (330, 525), (340, 570))",
"def draw_horizontal_paddle(self):\n pygame.draw.rect(self.screen, self.color, self.top_rect)\n pygame.draw.rect(self.screen, self.color, self.bot_rect)",
"def draw_final_image(self, image, warped, undist, ploty, left_fitx, right_fitx, Minv, left_rad, right_rad):\n gray = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY)\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(gray).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))\n # Combine the result with the original image\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\n\n font = cv2.FONT_HERSHEY_SIMPLEX\n off_center = calculate_center(left_fitx, right_fitx, image.shape)\n direction_str = 'left' if off_center < 0 else 'right'\n center_str = '{:.2f} m of center {}'.format(abs(off_center), direction_str)\n cv2.putText(result, center_str, (430, 630), font, 1, (0, 0, 255), 2, cv2.LINE_AA)\n if left_rad and right_rad:\n curvature = 0.5 * (round(right_rad / 1000, 1) + round(left_rad / 1000, 1))\n else:\n curvature = 0\n str2 = 'Radius of curvature: {} km'.format(curvature)\n cv2.putText(result, str2, (430, 670), font, 1, (0, 0, 255), 2, cv2.LINE_AA)\n\n if self.args.is_test:\n plt.imshow(result)\n plt.show()\n\n return result",
"def ActiveHlt1Lines(self) :\n lines = ['Hlt1IncPhi','Hlt1CalibTracking']\n\n return lines",
"def ActiveHlt2Lines(self) :\n hlt2 = ['Hlt2PassThrough','Hlt2Lumi','Hlt2DebugEvent','Hlt2Forward','Hlt2ErrorEvent','Hlt2Transparent',\n 'Hlt2diPhotonDiMuon',\n 'Hlt2LowMultMuon',\n 'Hlt2LowMultHadron',\n 'Hlt2LowMultPhoton',\n 'Hlt2LowMultElectron',\n 'Hlt2LowMultHadron_nofilter',\n 'Hlt2LowMultElectron_nofilter',\n 'Hlt2HighPtJets'\n ]\n\n\n from Muons_April2012 import Muons_April2012\n hlt2.extend( Muons_April2012().ActiveHlt2Lines() )\n\n from Electrons_July2011 import Electrons_July2011\n hlt2.extend( Electrons_July2011().ActiveHlt2Lines() )\n\n from Hadrons_September2012 import Hadrons_September2012\n hlt2.extend( Hadrons_September2012().ActiveHlt2Lines() )\n \n from DV_draft2012 import DV_draft2012 \n hlt2.extend( DV_draft2012().ActiveHlt2Lines() )\n\n from CharmLeptonic_draft2012 import CharmLeptonic_draft2012\n hlt2.extend( CharmLeptonic_draft2012().ActiveHlt2Lines() )\n\n from CharmCEP_September2012 import CharmCEP_September2012\n hlt2.extend( CharmCEP_September2012().ActiveHlt2Lines() )\n\n from KshortMuMuPiPi_July2012 import KshortMuMuPiPi_July2012\n hlt2.extend( KshortMuMuPiPi_July2012().ActiveHlt2Lines() )\n \n return hlt2",
"def lane_fill_poly(self, binary_warped,undist, inverse_perspective_transform, left_fit,right_fit):\r\n # Generate x and y values\r\n ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )\r\n left_fitx = self.get_val(ploty, left_fit)\r\n right_fitx = self.get_val(ploty, right_fit)\r\n \r\n # Create an image to draw the lines on\r\n warp_zero = np.zeros_like(binary_warped).astype(np.uint8)\r\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\r\n\r\n # Recast x and y for cv2.fillPoly()\r\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\r\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\r\n pts = np.hstack((pts_left, pts_right))\r\n\r\n # Draw the lane \r\n cv2.fillPoly(color_warp, np.int_([pts]), (255,255, 255))\r\n\r\n # Warp using inverse perspective transform\r\n newwarp = cv2.warpPerspective(color_warp, inverse_perspective_transform, (binary_warped.shape[1], binary_warped.shape[0])) \r\n # overlay\r\n #newwarp = cv.cvtColor(newwarp, cv.COLOR_BGR2RGB)\r\n result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)\r\n \r\n return result",
"def __add_to_piano_roll(self, line_element: LineElement) -> None:\n self._piano_roll[\n line_element.scale_element.position_in_semitones,\n line_element.start_time_in_eighths:line_element.end_time_in_eighths\n ] = 1",
"def on_draw_overlay(self):",
"def _inverse_lines(self):\n pass",
"def PINTARLEYENDAPLANOXY(self):\n \n # Pinto la linea del eje X\n self.telaMAPA.create_line(40, 560, 680, 560)\n\n # Pinto los numeros del eje x\n for i in range(0, 26):\n # Lugar de referencia a pintar que se mueve en x\n x0 = ((i+1)*24) + 30\n self.telaMAPA.create_text(x0, 580, text=str(i))\n\n \n # Pinto la linea del eje y\n self.telaMAPA.create_line(40, 20, 40, 560)\n\n # Pinto los numeros del eje y\n for i in range(0, 26):\n # Lugar de referencia a pintar\n y0 = ((i+1)*21) + 6\n self.telaMAPA.create_text(20, y0, text=str(25 - i))\n\n \n # Vamos a pintar la botonera\n self.PINTARMATRIXDEBOTONES()\n\n # Vamos a rellenar la matrix que controla el pintado de las paredes\n self.rellenarMatrix()",
"def draw_flow(img, pts, next_pts, flowColor = (0,0,255), flowThickness = 1, p=1, q=1, th = 0, drawArrows=False,\n lenghtOfArrayArm = 2, angleOfArrow=np.pi/3):\n if pts.shape[0] == 0 or next_pts.shape[0] == 0 or pts.shape[0] != next_pts.shape[0]:\n return img\n lines = np.hstack((pts, next_pts))\n #make it into format opencv wants\n lines = lines.reshape(-1,2,2)\n #round up to nears integer\n lines = np.int32(lines + 0.5)\n\n #select p every q\n index = np.arange(lines.shape[0])\n index = index[(index%q) < p]\n lines = lines[index]\n\n #filter small values\n if th > 0:\n #make points into a easy way to manipulate\n points = lines.reshape(-1, 4)\n #compute displacement\n displacement = points[:,2:4] - points[:,0:2]\n S = np.linalg.norm(displacement, axis=1)\n lines = lines[S > th]\n \n if len(img.shape) < 3:\n #make sure we're dealing with a BGR image\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n #draw multiple lines\n cv2.polylines(img, lines, isClosed = False, color = flowColor, thickness=flowThickness)\n\n if drawArrows:\n #compute flow direction\n flow = lines[:, 1, :] - lines[:,0,:]\n flow_angle = np.arctan2(flow[:,1], flow[:,0]).reshape(-1,1)\n\n #get start point of every arrow\n startPoints_x = lines[:, 1, 0].reshape(-1,1)\n startPoints_y = lines[:, 1, 1].reshape(-1,1)\n\n #get end point of arrow arm 1\n endPoints_x = (startPoints_x + lenghtOfArrayArm * np.cos( angleOfArrow + np.pi + flow_angle)).reshape(-1,1)\n endPoints_y = (startPoints_y + lenghtOfArrayArm * np.sin( angleOfArrow + np.pi + flow_angle)).reshape(-1,1)\n\n #get end point of arrow arm 2\n endPoints2_x = (startPoints_x + lenghtOfArrayArm * np.cos( -1.0*angleOfArrow + np.pi + flow_angle)).reshape(-1,1)\n endPoints2_y = (startPoints_y + lenghtOfArrayArm * np.sin( -1.0*angleOfArrow + np.pi + flow_angle)).reshape(-1,1)\n\n\n #create array with line indications the way opencv wants it\n arrowArms = np.hstack((startPoints_x, startPoints_y, endPoints_x, endPoints_y))\n arrowArms2 = np.hstack((startPoints_x, startPoints_y, endPoints2_x, endPoints2_y))\n arrowArms = np.vstack((arrowArms, arrowArms2))\n arrowArms = arrowArms.reshape((-1,2,2))\n arrowArms = np.array(arrowArms, dtype = np.int32)\n\n\n #draw multiple lines\n cv2.polylines(img, arrowArms, isClosed = False, color = flowColor, thickness=flowThickness)\n\n\n return img",
"def line_plane(l, p):\n d = dot((p.o - l.o), p.n) / dot(l.d, p.n)\n return l(d)"
] | [
"0.59270585",
"0.58033264",
"0.5608836",
"0.55544627",
"0.5367089",
"0.53572994",
"0.53372705",
"0.5273966",
"0.52700067",
"0.5266239",
"0.52600336",
"0.5244795",
"0.51854324",
"0.5150837",
"0.5133585",
"0.51236475",
"0.51020104",
"0.50978017",
"0.5095993",
"0.5059783",
"0.5050122",
"0.50479436",
"0.50455284",
"0.50362366",
"0.50353545",
"0.50268257",
"0.50054336",
"0.500227",
"0.4981197",
"0.49799535"
] | 0.6246182 | 0 |
Writes linac phasing lines to a Bmad file. Requires epics (or proxy object). | def write_bmad_linac_phasing_lines(filePath='linac_settings.bmad', epics=None, verbose=False):
lines = bmad_linac_phasing_lines(epics)
with open(filePath, 'w') as f:
for l in lines:
f.write(l+'\n')
if verbose:
print('Written:', filePath) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_tao_BC_and_LEM_lines(filePath='LEM_settings.tao', epics=None, verbose=False):\n lines = tao_BC_and_LEM_lines(epics)\n with open(filePath, 'w') as f:\n for l in lines:\n f.write(l+'\\n')\n if verbose:\n print('Written:', filePath)\n\n \n \n return lines",
"def bmad_linac_phasing_lines(epics):\n lines = [\n '! Linac overall phasing',\n 'O_L1[phase_deg] = 0 ! K21_1 sets this directly. This is a delta on top of that.', \n 'O_L2[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:CALC204')),\n 'O_L3[phase_deg] = '+str(epics.caget('SIOC:SYS0:ML00:AO499'))\n ]\n return lines",
"def tao_BC_and_LEM_lines(epics):\n bc1_e0=epics.caget('SIOC:SYS0:ML00:AO483')*1e6\n bc2_e0=epics.caget('SIOC:SYS0:ML00:AO489')*1e9\n l3_e0 =epics.caget('SIOC:SYS0:ML00:AO500')*1e9\n \n # Charge in LTU\n q_after_horn_cutting = epics.caget('SIOC:SYS0:ML00:CALC252')*1e-12 # pC -> C\n bc1_offset=epics.caget('BMLN:LI21:235:MOTR')*1e-3\n bc2_offset=epics.caget('BMLN:LI24:805:MOTR')*1e-3\n \n bc1_current=epics.caget('SIOC:SYS0:ML00:AO485')\n bc2_current=epics.caget('SIOC:SYS0:ML00:AO195')\n \n # Catch bad settings\n if bc1_current==0:\n print('Warning: BC1 current is zero!')\n bc1_sigma_z = 0\n else:\n # Assumes parabolic distribution\n bc1_sigma_z = q_after_horn_cutting*299792458 / sqrt(10) / bc1_current\n\n if bc2_current==0:\n print('Warning: BC1 current is zero!')\n bc2_sigma_z = 0\n else:\n # Assumes Gaussian distribution\n bc2_sigma_z = q_after_horn_cutting*299792458 / sqrt(12) / bc2_current \n \n lines = []\n lines.append('set dat BC1.energy[1]|meas = '+str(bc1_e0))\n lines.append('set dat BC2.energy[1]|meas = '+str(bc2_e0))\n lines.append('set dat L3.energy[2]|meas = '+str(l3_e0))\n lines.append('set dat BC1.offset[1]|meas = '+str(bc1_offset))\n lines.append('set dat BC2.offset[1]|meas = '+str(bc2_offset))\n \n lines.append(f'! Charge after horn cutting: {q_after_horn_cutting*1e12:10.4} pC')\n lines.append(f'! For BC1 current {bc1_current} A')\n lines.append('set dat BC1.beam[1]|meas = '+str( bc1_sigma_z))\n lines.append(f'! For BC2 current {bc2_current} A')\n lines.append('set dat BC2.beam[1]|meas = '+str( bc2_sigma_z)) \n\n return lines",
"def writelines(self, lines):\n for line in lines:\n self.write(line)",
"def writelines(self, seq):\n for line in seq:\n self.write(line)",
"def writelines(self, seq: list[str]) -> None:\n ...",
"def write_output(self,content):\n text=\"\"\"# typ eta phi pt jmass ntrk btag had/em dummy dummy\\n\"\"\"\n self.output.writelines(text)\n text=\"0 \"+str(self.nb_data)+\" \"+str(len(content))+\"\\n\"\n self.output.writelines(text)\n\n i=1\n for particle in content:\n text=str(i)+' '+particle.lhco_line()+'\\n'\n self.output.writelines(text)\n i+=1",
"def _write_to_file(self):\n with open(self.filename + \".asm\", \"w+\") as file:\n file.writelines(\n [\"\\n\" + l if p != 0 else l for p, l in enumerate(self.lines)]\n )",
"async def writelines(self, lines):\n # first check if the file is binary or not\n if 'b' in self._mode:\n raise APIException(\n \"writelines on a binary file is not permitted: {}\".format(\n self._uri)\n )\n # write all but the last line with a line break\n for l in lines:\n await self.write((l+\"\\n\").encode('utf-8'))\n return True",
"def write_lines(list_of_lines, file):\r\n for i in range(0, len(list_of_lines)):\r\n file.write(list_of_lines[i] + b\"\\n\")",
"def writelines(lines, filename, encoding='utf-8', mode='wb'):\r\n return write(os.linesep.join(lines), filename, encoding, mode)",
"def write2lines(myItrb, out_fn):\n with open(out_fn, 'w') as writer:\n for item in myItrb:\n writer.write(str(item)+'\\n')",
"def _write_to_file(self):\n with open(self.filename + \".ir\", \"w+\") as file:\n file.writelines(\n [\"\\n\" + l if p != 0 else l for p, l in enumerate(self.lines)]\n )",
"def writeData(self, lines, fpath):\n with open(fpath, 'w') as f:\n for line in lines:\n print(line, file=f)",
"def write_to(channel, lines):\n for s in lines:\n channel.write(s)\n channel.write('\\n')",
"def write(self,aFile,lines):\n # Not necessary (comment older than 021 - no idea what does that mean)\n # Maybe meant to be obsoleted by writeLine and writeLog\n self.debug.printHeader()\n for line in lines:\n if not hasattr(line,'upper'): line=self.settings.pathStorage.composeURL(line)\n # Really poor way how differ between string and list\n # Should be rewriten. Lines could contain only array of strings (not array of arrays).\n aFile.write(line)\n aFile.write('\\n')",
"def lines_to_file(file_name: str, write_dir: str, lines: Sequence[str]):\n with open(os.path.join(write_dir, file_name), \"w\", encoding=\"utf-8\") as f:\n for l in lines:\n f.write(f\"{l}\\n\")",
"def write_lines(filename, lines, verbose=True):\n with open(filename, 'w', encoding=\"utf-8\") as fp:\n for line in lines:\n print(line, file=fp)\n if verbose:\n print(\"Done writing to file %s.\" % filename)",
"def write_lines_to_file(filename, lines):\n with open(filename, 'w') as fp:\n for line in lines:\n fp.write(\"%s\\n\" % line.strip('\\n'))",
"def write(afile, seqs): \n for s in seqs :\n writeseq(afile, s)",
"def sheetbend(exe, hklin, pdbin, pdbout, ncyc, logfile):\n\n mtz_labels = mtz_util.GetLabels(hklin)\n colin = \"{0},{1}\".format(mtz_labels.f, mtz_labels.sigf)\n\n cmd = [exe, \"--pdbin\", pdbin, \"--mtzin\", hklin, \"--pdbout\", pdbout, \"--colin-fo\", colin, \"-cycles\", str(ncyc), \"-resolution-by-cycle\", \"6,3\"]\n stdout = cexec(cmd)\n with open(logfile, \"w\") as f_out:\n f_out.write(stdout)",
"def print_to_file(list_of_lines, file_path):\r\n with open(file_path) as output_file:\r\n write_lines(list_of_lines, output_file)",
"def writeBlade(self):\n\n ofname = self.blade1_file ### note, assuming they're all the same\n ofh = open(ofname,'w')\n\n for line in self.lines_blade:\n ofh.write(line)\n ofh.close()",
"def writeMultipleFileLines(self, filePaths, liness): \n \n for i,filePath in enumerate(filePaths): \n self.writeSingleFileLines(filePath,liness[i])",
"def _write_endcy():\n return []",
"def writeEcMaps( self ):\n\n self.logger.info( 'writeEcMaps: START' )\n\n self.logger.info( 'writeEcMaps: insert file will be ecMapsInsert.psql' )\n\n ecMapsFile = self.openInsertFile( 'ecMapsInsert.psql' )\n\n self.logger.info( 'writeEcMaps: keggreader.getEcMaps(): START' )\n\n ecMaps = self.reader.getEcMaps()\n\n self.logger.info( 'writeEcMaps: keggreader.getEcMaps(): START' )\n\n for ec,mapNumbers in ecMaps.iteritems():\n ecId = self.importerEc.ecsInserted[ ec ]\n \n for mapNumber in mapNumbers:\n\n if mapNumber in self.importerPathway.pathwayMapsInserted:\n\n mapId = self.importerPathway.pathwayMapsInserted[ mapNumber ]\n\n #self.writeEcMapsFile( ecMapsFile, ecId, mapId )\n self.writeFile( ecMapsFile, 'ec_maps', [ str(ecId), str(mapId) ] )\n\n self.logger.info( 'writeEcMaps: DONE' )",
"def writeOut(self):\n # import time\n self.outHeader = self.srcHeader\n for line in self.outHeader:\n self.outFile.write(line + '\\n')\n # now = time.asctime(time.localtime(time.time()))\n # self.outFile.write('%% -- %s -- Written to new alog' % now)\n for time_s in sorted(self.outData):\n for sens in self.outData[time_s]:\n for meas in self.outData[time_s][sens]:\n valu = self.outData[time_s][sens][meas]\n msg_list = [str(time_s), meas, sens, str(valu)]\n line_string = reconstructLine(msg_list)\n self.outFile.write(line_string + '\\n')",
"def save_aligned_BFE(*args):\r\n\r\n try:\r\n global bambara_bfe\r\n global francais_bfe\r\n global english_bfe\r\n bambara_bfe.append(lines_bam[line_no_1])\r\n francais_bfe.append(lines_fr[line_no_2])\r\n english_bfe.append(lines_en[line_no_3])\r\n except ValueError:\r\n pass",
"def _file_writer(self, lines, filename):\n if self.MockRun:\n return\n\n if self.Verbose:\n print \"Writing file %s\" % filename\n\n updated_file = open(filename, 'w')\n updated_file.write(''.join(lines))\n updated_file.close()",
"def write(self, lines):\n strip_lines = lines.strip()\n if strip_lines:\n for line in strip_lines.split('\\n'):\n self._add(line.strip())"
] | [
"0.6657263",
"0.6484899",
"0.5828503",
"0.5651945",
"0.5593861",
"0.5564902",
"0.54197687",
"0.5337634",
"0.5288988",
"0.5269306",
"0.52646947",
"0.52209884",
"0.5204748",
"0.51610637",
"0.5106298",
"0.5046272",
"0.5036513",
"0.5005771",
"0.49783012",
"0.49764898",
"0.49703205",
"0.49586713",
"0.4950611",
"0.49445367",
"0.49172002",
"0.49154788",
"0.4909088",
"0.48975632",
"0.4897193",
"0.48945704"
] | 0.7545118 | 0 |
Writes tao LEM lines to a .tao file. Requires epics (or proxy object). | def write_tao_BC_and_LEM_lines(filePath='LEM_settings.tao', epics=None, verbose=False):
lines = tao_BC_and_LEM_lines(epics)
with open(filePath, 'w') as f:
for l in lines:
f.write(l+'\n')
if verbose:
print('Written:', filePath)
return lines | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def format_tep_file_lines(otu_table_data, mapping_lines, tree_lines,\r\n prefs_dict):\r\n\r\n # write tree file lines\r\n lines = ['>>tre\\n']\r\n lines += [tree_lines.read()]\r\n lines += '\\n'\r\n\r\n # get otu table data\r\n if(otu_table_data.ObservationMetadata):\r\n lines += ['>>otm\\n#OTU ID\\tOTU Metadata\\n']\r\n for i in range(len(otu_table_data.ObservationIds)):\r\n new_string = otu_table_data.ObservationIds[i] + '\\t'\r\n for m in otu_table_data.ObservationMetadata[i]['taxonomy']:\r\n new_string += m + ';'\r\n lines += [new_string]\r\n lines += '\\n'\r\n\r\n # format and write otu table and taxonomy lines\r\n lines += ['>>osm\\n']\r\n if otu_table_data.ObservationMetadata is None:\r\n lines += [str(otu_table_data.delimitedSelf())]\r\n elif \"taxonomy\" in otu_table_data.ObservationMetadata[0]:\r\n lines += [str(otu_table_data.delimitedSelf(header_key=\"taxonomy\",\r\n header_value=\"Consensus Lineage\",\r\n metadata_formatter=lambda x: ';'.join(x)))]\r\n\r\n # write mapping file lines\r\n lines += ['\\n>>sam\\n']\r\n lines += mapping_lines.readlines()\r\n\r\n # if prefs file supplied, write pref lines\r\n if prefs_dict:\r\n te_prefs = format_te_prefs(prefs_dict)\r\n lines += ['\\n>>pre\\n']\r\n lines += te_prefs\r\n\r\n return lines",
"def createTOFin(En):\n ftemplate = open(\"TOFtemplate.in\", \"r\")\n lines = ftemplate.readlines()\n ftofin = open(\"TOF.in\", \"w\") \n energyline = lines[12].split()\n lines[12] = \"%s %g %s\\n\"%(energyline[0], En, energyline[2])\n ftofin.writelines(lines)\n ftemplate.close()\n ftofin.close()",
"def write_bmad_linac_phasing_lines(filePath='linac_settings.bmad', epics=None, verbose=False):\n lines = bmad_linac_phasing_lines(epics)\n with open(filePath, 'w') as f:\n for l in lines:\n f.write(l+'\\n')\n if verbose:\n print('Written:', filePath)",
"def tao_BC_and_LEM_lines(epics):\n bc1_e0=epics.caget('SIOC:SYS0:ML00:AO483')*1e6\n bc2_e0=epics.caget('SIOC:SYS0:ML00:AO489')*1e9\n l3_e0 =epics.caget('SIOC:SYS0:ML00:AO500')*1e9\n \n # Charge in LTU\n q_after_horn_cutting = epics.caget('SIOC:SYS0:ML00:CALC252')*1e-12 # pC -> C\n bc1_offset=epics.caget('BMLN:LI21:235:MOTR')*1e-3\n bc2_offset=epics.caget('BMLN:LI24:805:MOTR')*1e-3\n \n bc1_current=epics.caget('SIOC:SYS0:ML00:AO485')\n bc2_current=epics.caget('SIOC:SYS0:ML00:AO195')\n \n # Catch bad settings\n if bc1_current==0:\n print('Warning: BC1 current is zero!')\n bc1_sigma_z = 0\n else:\n # Assumes parabolic distribution\n bc1_sigma_z = q_after_horn_cutting*299792458 / sqrt(10) / bc1_current\n\n if bc2_current==0:\n print('Warning: BC1 current is zero!')\n bc2_sigma_z = 0\n else:\n # Assumes Gaussian distribution\n bc2_sigma_z = q_after_horn_cutting*299792458 / sqrt(12) / bc2_current \n \n lines = []\n lines.append('set dat BC1.energy[1]|meas = '+str(bc1_e0))\n lines.append('set dat BC2.energy[1]|meas = '+str(bc2_e0))\n lines.append('set dat L3.energy[2]|meas = '+str(l3_e0))\n lines.append('set dat BC1.offset[1]|meas = '+str(bc1_offset))\n lines.append('set dat BC2.offset[1]|meas = '+str(bc2_offset))\n \n lines.append(f'! Charge after horn cutting: {q_after_horn_cutting*1e12:10.4} pC')\n lines.append(f'! For BC1 current {bc1_current} A')\n lines.append('set dat BC1.beam[1]|meas = '+str( bc1_sigma_z))\n lines.append(f'! For BC2 current {bc2_current} A')\n lines.append('set dat BC2.beam[1]|meas = '+str( bc2_sigma_z)) \n\n return lines",
"def lines_to_file(file_name: str, write_dir: str, lines: Sequence[str]):\n with open(os.path.join(write_dir, file_name), \"w\", encoding=\"utf-8\") as f:\n for l in lines:\n f.write(f\"{l}\\n\")",
"def save_to_file(self, tojuliet):\n if self.lc.time[0] < 1e4:\n self.lc.time += 2457000\n ascii.write([self.lc.time, self.lc.flux, self.lc.flux_err], 'TIC%d.dat' % self.TIC,\n format='fixed_width_no_header', delimiter=' ', overwrite=True)\n if tojuliet:\n ascii.write([self.lc.time, self.lc.flux, self.lc.flux_err,\n ['TESS' for _ in self.lc.time]], 'TIC%d_juliet.dat' % self.TIC,\n format='fixed_width_no_header', delimiter=' ', overwrite=True)",
"def save_to_MTFIT_style_file(MTs, MTp, nlloc_hyp_filename, inversion_type, outdir, MTp_absolute=[], shift_idxs=[]):\n # Get uid and stations data:\n uid, stations = get_event_uid_and_station_data_MTFIT_FORMAT_from_nonlinloc_hyp_file(nlloc_hyp_filename)\n # Write all data to output dict:\n out_dict = {}\n out_dict[\"MTs\"] = MTs\n out_dict[\"MTp\"] = MTp\n out_dict[\"uid\"] = uid\n out_dict[\"stations\"] = stations\n if len(MTp_absolute)>0:\n out_dict[\"MTp_absolute\"] = MTp_absolute\n if len(shift_idxs)>0:\n out_dict[\"shift_idxs\"] = shift_idxs\n # And save to file:\n out_fname = outdir+\"/\"+uid+\"_FW_\"+inversion_type+\".pkl\"\n print(\"Saving FW inversion to file:\", out_fname)\n pickle.dump(out_dict, open(out_fname, \"wb\"))",
"def write_traces(obj, arc, outfile):\n tdict = dict(obj=obj, arc=arc)\n jdict = ltu.jsonify(tdict)\n # Write\n ltu.savejson(outfile, jdict, easy_to_read=True, overwrite=True)\n print(\"Wrote Traces to {:s}\",outfile)",
"def write_trajectory(self, environmnent, pdb_filename):\n # TODO\n pass",
"def writelines(self, lines):\n for line in lines:\n self.write(line)",
"def save_rollout_to_file(self, episode):\n # get save path\n save_path = os.path.join(self.save_dir, \"rollout_{}.h5\".format(self.counter))\n\n # save rollout to file\n f = h5py.File(save_path, \"w\")\n f.create_dataset(\"traj_per_file\", data=1)\n\n # store trajectory info in traj0 group\n traj_data = f.create_group(\"traj0\")\n traj_data.create_dataset(\"states\", data=np.array(episode.observation))\n traj_data.create_dataset(\"images\", data=np.array(episode.image, dtype=np.uint8))\n traj_data.create_dataset(\"actions\", data=np.array(episode.action))\n\n terminals = np.array(episode.done)\n if np.sum(terminals) == 0:\n terminals[-1] = True\n\n # build pad-mask that indicates how long sequence is\n is_terminal_idxs = np.nonzero(terminals)[0]\n pad_mask = np.zeros((len(terminals),))\n pad_mask[:is_terminal_idxs[0]] = 1.\n traj_data.create_dataset(\"pad_mask\", data=pad_mask)\n\n f.close()\n\n self.counter += 1",
"def write(self, outputFile):\n \n try: \n f = open(outputFile + '.py', 'w')\n for trail in self.trails: \n f.write(\"[\")\n for index in trail:\n f.write(\"({0}, {1}), \".format(*index)) \n f.write(\"]\\n\")\n \n except IOError, e:\n msg = \"Exception encountered when attempting \" + \\\n \"to write data to file: {0}.\" + \\\n \"\\n\\t -- Exception was: {1}\" + \\\n \"\\n\\t For help use --help\".format(outputFile, e)\n raise Usage(e)",
"def write_telluric_transmission_to_file(wls,T,outpath):\n import pickle\n print('------Saving teluric transmission to '+outpath)\n with open(outpath, 'wb') as f: pickle.dump((wls,T),f)",
"def write_body(self):\r\n if self.arguments['--out']:\r\n self.file = open(self.arguments['--out'], \"a+\")\r\n for list_item in self.list_of_body_objects:\r\n self.file.write(list_item.line)\r\n self.file.close()\r\n else:\r\n for list_item in self.list_of_body_objects:\r\n print(list_item.line)",
"def write_file(poet, info_dict):\r\n\r\n filename = SAVE_PATH + '/' + poet + '/' + str(info_dict['id']) + '_'+ str(info_dict['pagenum']) \\\r\n + '_' + info_dict['id2'] +'_' + info_dict['ord2'] \\\r\n + '_' + info_dict['id3'] + '_' + info_dict['ord3'] \\\r\n + '_' + info_dict['id4'] + '_' + info_dict['ord4'] + '.txt'\r\n\r\n print(filename)\r\n with open(filename, 'w', encoding='utf-16') as f:\r\n txt = ','.join([str(info_dict[k]) for k in KEYS ])\r\n txt = txt + '\\n' + '\\n'.join([x for x in info_dict['beyts']])\r\n f.write(txt)\r\n\r\n\r\n locale.setlocale(locale.LC_ALL, '')\r\n DELIMITER = ';'# if locale.localeconv()['decimal_point'] == ',' else ','\r\n\r\n list_of_lists = [[info_dict[k] for k in KEYS]]\r\n with open('D:/poem/molana.csv', 'a', newline='', encoding='utf-16') as csvfile:\r\n\r\n writer = csv.writer(csvfile, delimiter=DELIMITER)\r\n writer.writerows(list_of_lists)",
"def write_file(l_dta, outputfile):\n l_dta2 = []\n for row in l_dta:\n s = '\\t'.join(row)\n l_dta2.append(s)\n s_dta = \"\\r\\n\".join(l_dta2)\n try:\n with open(outputfile, 'w') as fd:\n fd.write(s_dta)\n except (IOError,) as e:\n tracker()\n return None",
"def test_file_write_attributes_for_each(self):\n\n with OrthoMultiTs(self.testfilename, n_loc=3, mode=\"w\") as dataset:\n n_data = 5\n locations = np.array([1, 2, 3])\n data = {\n \"test\": np.arange(n_data * 3).reshape(3, n_data),\n \"test2\": np.arange(n_data * 3).reshape(3, n_data)\n }\n base = datetime(2007, 1, n_data)\n dates = np.array(\n [base + timedelta(hours=i) for i in range(n_data)])\n descriptions = np.repeat([str(\"station\")], 3).tolist()\n\n dataset.write_all(locations,\n data,\n dates,\n loc_descrs=descriptions,\n lons=np.arange(3),\n lats=np.arange(3),\n alts=np.arange(3),\n attributes={\n \"test\": {\n \"testattribute\": \"teststring\"\n },\n \"test2\": {\n \"testattribute2\": \"teststring2\"\n }\n })\n\n with OrthoMultiTs(self.testfilename) as dataset:\n data = dataset.read_all(2)\n nptest.assert_array_equal(data[\"test\"], np.arange(5) + 5)\n assert dataset.dataset.variables[\n \"test\"].testattribute == \"teststring\"\n assert dataset.dataset.variables[\n \"test2\"].testattribute2 == \"teststring2\"\n test_dates = []\n for n_data in [5]:\n base = datetime(2007, 1, n_data)\n test_dates.append(\n np.array(\n [base + timedelta(hours=i) for i in range(n_data)]))\n dates = np.concatenate(test_dates)\n nptest.assert_array_equal(data[\"time\"], dates)",
"def addTrailer(file):\n program = '\\t\\t</coordinates>\\n'\n program += '\\t</LineString>\\n'\n program += '\\t</Placemark>\\n'\n program += '</Document>\\n'\n program += '</kml>\\n'\n file.write(program)",
"def create_telemetry_file():\n loginfo(\"Creating telem file if it doesn't exist...\")\n with open(HAB_TELEM_FILE, \"w\"):\n pass",
"def write_output(self,content):\n text=\"\"\"# typ eta phi pt jmass ntrk btag had/em dummy dummy\\n\"\"\"\n self.output.writelines(text)\n text=\"0 \"+str(self.nb_data)+\" \"+str(len(content))+\"\\n\"\n self.output.writelines(text)\n\n i=1\n for particle in content:\n text=str(i)+' '+particle.lhco_line()+'\\n'\n self.output.writelines(text)\n i+=1",
"def write_to_file(self,\n ofile=\"output.txt\",\n **kwargs):\n with open(file=ofile, mode='a') as ofile:\n for num_line, obj in self.items():\n ofile.write(str(self._construct_output_string(num_line=num_line,\n obj=obj,\n **kwargs)))",
"def write_novel_alleles(alleles, novel):\n alleles_dict = {mistutils.basename(x):x for x in os.listdir(alleles)}\n\n for gene in novel:\n\n filename = os.path.join(alleles, alleles_dict[gene])\n\n with open(filename, 'a') as f:\n for allele in novel[gene]:\n f.write('>placeholder\\n')\n f.write(allele + \"\\n\")\n\n fix_headers(filename)",
"def write_tsv(labels, positions, elec_file):\n labels = labels.reshape(-1, order='F')\n positions = positions.reshape(-1, 3, order='F')\n\n elec_file = elec_file.with_suffix('.tsv')\n with elec_file.open('w') as f:\n f.write('name\\tx\\ty\\tz\\n')\n for i in range(labels.shape[0]):\n f.write(f'{labels[i]}\\t{positions[i, 0]:.3f}\\t{positions[i, 1]:.3f}\\t{positions[i, 2]:.3f}\\n')",
"def save(filename, points3, tris, metadata):\n logging.info(\"saving mesh: %s\"%filename)\n cells = {'triangle':tris}\n vtk_io.write(filename, points3, cells)\n with open(filename+'.readme','w') as fid:\n fid.write(metadata)",
"def write_obj(output_file_name, obj_name, mtl_lib_file, tex_lines,\n tex_map, n_verts, vertex_lines, n_normals,\n normals_lines, n_faces, faces_groups):\n\n def _join(lns):\n \"\"\"Joins lines.\n :lns: list of strings: Lines to join.\n Returns joined lines as string.\n \"\"\"\n return \"\\n\".join(lns)\n\n # Rebuild the faces data first.\n faces = \"\"\n for idx, lines in faces_groups.items():\n # Get the texture 'alias' or use a default value\n tex_name = _get_tex_name(tex_map, idx)\n faces += FACES_TEMPLATE.format(obj_name=obj_name, tex_name=tex_name,\n faces_lines=_join(lines))\n\n # 'Apply' data to the template.\n with open(output_file_name, \"w\") as fd_out:\n fd_out.write(OBJ_TEMPLATE.format(header=COMMON_HEADER,\n mtl_lib_file=mtl_lib_file,\n obj_name=obj_name,\n n_verts=n_verts,\n n_faces=n_faces,\n n_norms=n_normals,\n vertex_lines=_join(vertex_lines),\n tex_lines=_join(tex_lines),\n norms_lines=_join(normals_lines),\n faces_lines=faces))\n print \" * Saved '%s'.\" % output_file_name",
"def write_uem(uemf, uem, n_digits=3):\n with open(uemf, 'wb') as f:\n for file_id in sorted(iterkeys(uem)):\n for onset, offset in sorted(uem[file_id]):\n line = ' '.join([file_id,\n '1',\n format_float(onset, n_digits),\n format_float(offset, n_digits)\n ])\n f.write(line.encode('utf-8'))\n f.write(b'\\n')",
"def sauvegarder():\n\n fic = open(\"sauvegarde.txt\", \"w\")\n\n for i in range(Nombre_de_colonne):\n\n for j in range(Nombre_de_ligne):\n\n fic.write(str(etat[i][j]) + \"\\n\")\n\n fic.close()",
"def export_file_dto(self, active_model, objs=[], type=''):\n dto_parser = DtoParser()\n objs2 = []\n for obj in objs:\n objs2 += dto_parser.parseJointPromotion(obj)\n\n doc_type_obj = self.env[\"edi.doc.type\"]\n doc_obj = self.env[\"edi.doc\"]\n doc_type = doc_type_obj.search([(\"code\", '=', \"dto\")])[0]\n last_dto_file = doc_obj.search([(\"doc_type\", '=', doc_type.id)],\n order=\"date desc\", limit=1)\n if last_dto_file:\n count = last_dto_file.count + 1\n else:\n count = 1\n\n tmp_name = \"export_dto.txt\"\n file_len = len(objs2)\n filename = \"%sDTO%s.%s\" % (self.env.user.company_id.frigo_code,\n str(file_len).zfill(4),\n str(count).zfill(4))\n templates_path = self.addons_path('frigo_edi') + os.sep + 'wizard' + \\\n os.sep + 'templates' + os.sep\n mylookup = TemplateLookup(input_encoding='utf-8',\n output_encoding='utf-8',\n encoding_errors='replace')\n tmp = Template(filename=templates_path + tmp_name,\n lookup=mylookup, default_filters=['decode.utf8'])\n\n doc = tmp.render_unicode(o=objs2, type_=type, datetime=datetime,\n user=self.env.user).encode('utf-8', 'replace')\n file_name = self[0].service_id.output_path + os.sep + filename\n f = file(file_name, 'w')\n f.write(doc)\n f.close()\n file_obj = self.create_doc(filename, file_name, doc_type)\n file_obj.count = count",
"def newtwogfile(ntf_twogs):\n outfile = open(\"Twogs.txt\", \"w\")\n for x in ntf_twogs:\n outfile.write(\"%s\\n\" % x)\n outfile.close()",
"def saveenergyfile(path, meta, data):\n def serializemeta(meta):\n \"\"\"Convert metadata object to list of comment strings\"\"\"\n return [u\"#CTE_%s: %s\" % (key, meta[key]) for key in meta]\n\n with io.open(path, 'w+') as ff:\n ff.write(u\"\\n\".join(serializemeta(meta)))\n ff.write(u\"\\nvector,tipo,src_dst\\n\")\n for c in data:\n carrier = c['carrier']\n ctype = c['ctype']\n originoruse = c['originoruse']\n values = u\", \".join(u\"%.2f\" % v for v in c['values'])\n comment = u\" # %s\" % c['comment'] if c['comment'] else u\"\"\n ff.write(u\"%s, %s, %s, %s%s\\n\" % (carrier, ctype, originoruse, values, comment))"
] | [
"0.5752286",
"0.5749262",
"0.5700694",
"0.5613532",
"0.5605334",
"0.556102",
"0.54216707",
"0.5421532",
"0.5405027",
"0.53978723",
"0.53960645",
"0.53954494",
"0.5390986",
"0.5312852",
"0.5277949",
"0.5253673",
"0.5245013",
"0.5244172",
"0.52176124",
"0.5176581",
"0.5172888",
"0.51666254",
"0.5165132",
"0.5162644",
"0.51584023",
"0.5151599",
"0.51265514",
"0.51262856",
"0.5120784",
"0.5108108"
] | 0.7485731 | 0 |
Method to get the credentials from ~/.mofplusrc | def credentials_from_rc(self):
mprc_filename = os.environ["HOME"]+'/.mofplusrc'
with open(mprc_filename, 'r') as mprc:
username = mprc.readline().split()[0]
pw = mprc.readline().split()[0]
return username, pw | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_credentials():\n #home_dir = os.path.expanduser('~')\n home_dir = os.path.expanduser('/home/pi/')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'gmail-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials(self):\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, self.CRED_FILENAME)\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials(self):\n home_dir = os.path.expanduser(\"~\")\n credential_dir = os.path.join(home_dir, \".credentials\")\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, \"autoto.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, self.auth_flags)\n print(\"Storing credentials to \" + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser(os.getcwd())\n credential_dir = os.path.join(home_dir, '.credentials')\n print(credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n # normal, sane way of doing this that really shouldn't be changed\n #home_dir = os.path.expanduser('~')\n #credential_dir = os.path.join(home_dir, '.credentials')\n #if not os.path.exists(credential_dir):\n # os.makedirs(credential_dir)\n #credential_path = os.path.join(credential_dir,'calendar-python-quickstart.json')\n\n # stupid hacky way that I came up with to fix an issue with running this app as root\n credential_path = os.path.join('./credentials','calendar-python-quickstart.json') \n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(config['client secret file'], SCOPES)\n flow.user_agent = APPLICATION_NAME\n if args:\n credentials = tools.run_flow(flow, store, args)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'calendar-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'bis-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'reseller-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'credentialv_modify.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'drive-python-quickstart.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'thejam_calendar.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sally.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-showtime.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n print('Storing credentials to ' + credential_path)\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials(self):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'homework_logger-gmail-api.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\n flow.user_agent = self.APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'gmail-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'grader.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, tools.argparser.parse_args(args=[]))\n print('Storing credentials to ' + credential_path)\n return credentials"
] | [
"0.739547",
"0.7365536",
"0.73126113",
"0.73126113",
"0.7311609",
"0.7309989",
"0.7300369",
"0.7288538",
"0.72769576",
"0.72184205",
"0.71966267",
"0.71909285",
"0.71909285",
"0.71909285",
"0.71909285",
"0.71909285",
"0.7185963",
"0.71790165",
"0.71647274",
"0.71396244",
"0.71333355",
"0.7099693",
"0.709013",
"0.7079833",
"0.706036",
"0.7058538",
"0.7058538",
"0.7058538",
"0.7058538",
"0.70563006"
] | 0.857187 | 0 |
Method to get the credentials from the command line | def credentials_from_cmd(self):
username = raw_input("Email:")
pw = getpass.getpass()
return username, pw | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _config_credentials_get():\n user = input(\"username:\")\n password = getpass.getpass()\n url = input(\"url:\")\n return user, password, url",
"def get_credentials(options, environment):\n if options[\"--username\"] or options[\"--auth\"]:\n if not options[\"--username\"]:\n options[\"<username>\"] = lib.prompt(\n \"Please enter the username for %s...\" % environment\n )\n if not options[\"--password\"]:\n options[\"<password>\"] = lib.prompt(\n \"Please enter the password for %s...\" % environment, secret=True\n )\n return options",
"def get_credentials(commandline_flags=None):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if commandline_flags:\n credentials = tools.run_flow(flow, store, commandline_flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n username = input(\"Username: \")\n password = getpass.getpass(prompt='Password: ')\n return username, password",
"def get_creds_from_args(args):\n if args.prefs:\n (jamf_url, jamf_user, jamf_password) = get_credentials(args.prefs)\n else:\n jamf_url = \"\"\n jamf_user = \"\"\n jamf_password = \"\"\n\n # CLI arguments override any values from a prefs file\n if args.url:\n jamf_url = args.url\n elif not jamf_url:\n jamf_url = input(\"Enter Jamf Pro Server URL : \")\n if args.user:\n jamf_user = args.user\n elif not jamf_user:\n jamf_user = input(\n \"Enter a Jamf Pro user with API rights to upload a package : \"\n )\n if args.password:\n jamf_password = args.password\n elif not jamf_password:\n jamf_password = getpass.getpass(\n \"Enter the password for '{}' : \".format(jamf_user)\n )\n\n # encode the username and password into a basic auth b64 encoded string so that we can get the session token\n enc_creds = encode_creds(jamf_user, jamf_password)\n\n return jamf_url, jamf_user, jamf_password, enc_creds",
"def get_args():\n parser = build_arg_parser()\n\n args = parser.parse_args()\n\n return prompt_for_password(args)",
"def GetUserCredentials():\n email = options.email\n if email is None:\n email = GetEmail(\"Email (login for uploading to %s)\" % options.server)\n password = getpass.getpass(\"Password for %s: \" % email)\n return (email, password)",
"def get_credentials():\n try:\n import argparse\n flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\n except ImportError:\n flags = None\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(GoogleGsuiteAPI.CLIENT_SECRET_FILE, GoogleGsuiteAPI.SCOPES)\n flow.user_agent = GoogleGsuiteAPI.APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def getconfig(self):\n self.cmdargs.parse_args(self.args)\n config = self._getconfig(self.sources)\n\n if self.needlogin:\n config.credentials = { \n k: getattr(config, self.credentialKey[k].name)\n for k in self.authenticatorInfo.getCredentialKeys(config.auth)\n }\n\n config._freeze_varnames()\n return (self.client, config)",
"def _get_credentials(flags):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-visualizerhelptext.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def credentials():\n\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant_name = (os.environ.get('OS_TENANT_NAME') or\n os.environ.get('OS_PROJECT_NAME'))\n auth_url = os.environ.get('OS_AUTH_URL')\n\n config = configparser.RawConfigParser()\n if config.read(_CREDS_FILE):\n username = username or config.get('admin', 'user')\n password = password or config.get('admin', 'pass')\n tenant_name = tenant_name or config.get('admin', 'tenant')\n auth_url = auth_url or config.get('auth', 'uri')\n\n return {\n 'username': username,\n 'password': password,\n 'tenant_name': tenant_name,\n 'uri': auth_url\n }",
"def get_args(self):\n args = self._parser.parse_args()\n return self._prompt_for_password(args)",
"def get_credentials(self):\r\n \r\n try:\r\n import argparse\r\n #flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\n if self.noauth == True:\r\n flags = tools.argparser.parse_args(args=['--noauth_local_webserver'])\r\n else:\r\n flags = tools.argparser.parse_args(args=[])\r\n except ImportError:\r\n flags = None \r\n \r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'sheets.googleapis.com-allstarbot.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n secret = Path(self.CLIENT_SECRET_FILE)\r\n if secret.exists():\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n else:\r\n print(\"client_secret.json not found, using env vars\")\r\n if not os.environ.get('client_id') or not os.environ.get('client_secret'): \r\n print(\"env vars client_id and client_secret not found. canceling\")\r\n raise Exception(\"client secret error\")\r\n else:\r\n flow = OAuth2WebServerFlow(\r\n os.environ.get('client_id'),\r\n os.environ.get('client_secret'),\r\n self.SCOPES) \r\n \r\n flow.params['access_type'] = 'offline'\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials( flags=None ):\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'drive-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def getcreds():\n global user\n global password\n if not user:\n user = input(\"Please enter your username:\\n\")\n if not password:\n password = getpass.getpass(\"Please enter password:\\n\")",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'grader.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, tools.argparser.parse_args(args=[]))\n print('Storing credentials to ' + credential_path)\n return credentials",
"def credentials(self) -> Optional[pulumi.Input['CredentialsArgs']]:\n return pulumi.get(self, \"credentials\")",
"def get_credentials():\n # normal, sane way of doing this that really shouldn't be changed\n #home_dir = os.path.expanduser('~')\n #credential_dir = os.path.join(home_dir, '.credentials')\n #if not os.path.exists(credential_dir):\n # os.makedirs(credential_dir)\n #credential_path = os.path.join(credential_dir,'calendar-python-quickstart.json')\n\n # stupid hacky way that I came up with to fix an issue with running this app as root\n credential_path = os.path.join('./credentials','calendar-python-quickstart.json') \n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(config['client secret file'], SCOPES)\n flow.user_agent = APPLICATION_NAME\n if args:\n credentials = tools.run_flow(flow, store, args)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,\r\n 'bis-python-quickstart.json')\r\n\r\n store = oauth2client.file.Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_creds():\n with open(CREDS_PATH, 'r') as creds_file:\n creds = json.load(creds_file)\n return creds['uname'], creds['pword']",
"def get_credentials(self):\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, self.CRED_FILENAME)\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def get_credentials():\n\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'appsactivity-python-showtime.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n print('Storing credentials to ' + credential_path)\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n #home_dir = os.path.expanduser('~')\n home_dir = os.path.expanduser('/home/pi/')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, 'gmail-python-quickstart.json')\n\n store = oauth2client.file.Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser(os.getcwd())\n credential_dir = os.path.join(home_dir, '.credentials')\n print(credential_dir)\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'calendar-python-quickstart.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'drive-python-quickstart.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\r\n flow.user_agent = APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def _get_credentials(self):\n if self.config_file:\n with open(self.config_file) as f:\n config_str = f.read()\n credentials_dict = json.loads(config_str)\n self.credentials = credentials_dict[self.account][self.auth_type]\n else:\n self.credentials = {\n \"account\": os.environ.get('SNOWSQL_ACCOUNT'),\n \"user\": os.environ.get('SNOWSQL_USER'),\n \"password\": os.environ.get('SNOWSQL_PWD')\n }"
] | [
"0.7299473",
"0.7106037",
"0.6985046",
"0.6973925",
"0.6960516",
"0.6940761",
"0.6813968",
"0.67990017",
"0.6796827",
"0.6795659",
"0.6782839",
"0.67600983",
"0.6745715",
"0.6667258",
"0.6666305",
"0.66566455",
"0.662464",
"0.66160524",
"0.65844256",
"0.6557866",
"0.65372974",
"0.6520266",
"0.65107536",
"0.6488358",
"0.6486944",
"0.6486944",
"0.64834213",
"0.6466547",
"0.6453333",
"0.6443119"
] | 0.74783903 | 0 |
Prints the MFP banner | def print_banner(self):
print ":##::::'##::'#######::'########:::::::::::::::'###::::'########::'####:\n\
:###::'###:'##.... ##: ##.....::::'##::::::::'## ##::: ##.... ##:. ##::\n\
:####'####: ##:::: ##: ##::::::::: ##:::::::'##:. ##:: ##:::: ##:: ##::\n\
:## ### ##: ##:::: ##: ######:::'######::::'##:::. ##: ########::: ##::\n\
:##. #: ##: ##:::: ##: ##...::::.. ##.::::: #########: ##.....:::: ##::\n\
:##:.:: ##: ##:::: ##: ##::::::::: ##:::::: ##.... ##: ##::::::::: ##::\n\
:##:::: ##:. #######:: ##:::::::::..::::::: ##:::: ##: ##::::::::'####:\n\
:..:::::..:::.......:::..:::::::::::::::::::..:::::..::..:::::::::....:" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_banner(message):\n\n print(\"#############################################################################\")\n print(message)",
"def banner():\n print \"\"\" \n _____ __ \n |_ _|_ _ ___ / _| __ _ \n | |/ _` / __| |_ / _` |\n | | (_| \\__ \\ _| (_| |\n |_|\\__,_|___/_| \\__,_|\n \n \"\"\"\n print \"Welcome to use am-auto-start!\"\n print \"For more infomation --> www.tasfa.cn!!\"\n print \"<--------------------------------------------------->\"",
"def Banner():\n main_banner = pyfiglet.figlet_format(\" UTM NAT\", font = \"slant\")\n sub_banner1 = pyfiglet.figlet_format(\"tool\", font = \"isometric1\")\n sub_banner2 = \" -Generate a CSV file of Sophos UTM NAT statements-\"\n sub_banner3 = \" via REST API using the power of Python\"\n\n print()\n print('=' * 62)\n print(main_banner)\n print(sub_banner1)\n print()\n print(sub_banner2)\n print(sub_banner3)\n print()\n print('=' * 62)\n print()",
"def present_banner():\n writer(BANNER, FORMAT[\"BANNER\"])\n writer(\" \" * 30 + f\"version {VERSION}\")",
"def show_banner():\n print(\"\"\"\n _ _ _ _ _____ _______\n| | | | / \\ | | |_ _\\ \\ / / ____|\n| |_| | / _ \\ | | | | \\ \\ / /| _|\n| _ |/ ___ \\| |___ | | \\ V / | |___\n|_| |_/_/ \\_\\_____|___| \\_/ |_____|\n\n\nA super fast asynchronous http and https prober, to check who is (h)alive.\nDeveloped by gnc\n \"\"\")",
"def _print_banner(out_file, banner_text):\n banner_separator = \"\".ljust(len(banner_text), \"=\")\n\n out_file.write(\"\\n{}\\n{}\\n{}\\n\".format(\n banner_separator,\n banner_text,\n banner_separator))",
"def banner():\n print(\"\\033[32m\")\n print(\" ___ _ ___ _ _ _\")\n print(\" | _ )_ _ _ _| |_ ___ | __| |_ ___ _ _ _ _ __ _| | | | ___ ___ _ __\")\n print(\" | _ \\ '_| || | _/ -_) | _|| _/ -_) '_| ' \\/ _` | | | |__/ _ \\/ _ \\ '_ \\\\\")\n print(\" |___/_| \\_,_|\\__\\___| |___|\\__\\___|_| |_||_\\__,_|_|_|____\\___/\\___/ .__/\")\n print(\" |___| |_|\")\n print(\"\\033[0m\")",
"def print_banner(text):\n print(Figlet(font='smslant').renderText(text))",
"def print_banner(description):\n banner = len(description)\n if banner > 200:\n banner = 200\n\n # First banner\n print(\"\\n\")\n for _ in range(banner):\n print(\"*\", end=\"\")\n\n # Add description\n print(\"\\n%s\" % description)\n\n # Final banner\n for _ in range(banner):\n print(\"*\", end=\"\")\n print(\"\\n\")",
"def print_header(banner_name):\n print()\n print()\n print(\"----------------------------------------------------\")\n print(\" {0}\".format(banner_name))\n print(\"-----------------------------------------------------\")\n print()",
"def banner(message, border = '-'):\n line = border * len(message)\n print(line)\n print(message)\n print(line)",
"def banner_ascii():\n print(\"\")\n print(f\"\\n{RED} Steganography Tool{RESET}\")\n print(f\"{RED} Made By {RESET}\")\n print(f\"{RED} Ehthe Samul Islam Laskar USN:1DS16CS712 {RESET}\")\n print(f\"{RED} B Padma USN:1DS19CS420{RESET}\")\n print(f\"{RED} Nikhil D Kanyal USN:1DS17CS731{RESET}\")\n print(f\"{YELLOW}Type 'help' to see commands{RESET}\")",
"def splash_screen():\n figlet = Figlet(font=\"slant\")\n banner = figlet.renderText(\"TechX API Gateway\")\n print(banner)\n print(\"[+] 2020 TechX API Gateway www.cisco.com\\n\")",
"def welcome_banner():\n print('\\t*' * 10)\n print('\\t\\tWelcome!')\n print('\\tPut your knowledge to the test with this Ultimate Quiz Questions!')\n print('\\t*' * 10)\n print()",
"def print_banner(dog=True):\n banner = \"\"\n if dog:\n banner += \" ____,'`-,\\n\"\n banner += \" _,--' ,/::.;\\n\"\n banner += \" ,-' ,/::,' `---.___ ___,_\\n\"\n banner += \" | ,:';:/ ;'\\\"';\\\"`--./ ,-^.;--.\\n\"\n banner += \" |: ,:';,' ' `. ;` `-.\\n\"\n banner += \" \\\\:.,:::/;/ -:. ` | ` `-.\\n\"\n banner += \" \\\\:::,'//__.; ,; , , :.`-. :. | ; :.\\n\"\n banner += \" \\\\,',';/O)^. :' ; : '__` ` :::`. .:' )\\n\"\n banner += \" |,' |\\\\__,: ; ; '/O)`. :::`; ' ,'\\n\"\n banner += \" |`--'' \\\\__,' , ::::( ,'\\n\"\n banner += \" ` , `--' ,: :::,'\\\\ ,-'\\n\"\n banner += \" | ,; , ,::' ,::: |,'\\n\"\n banner += \" |,: .( ,:::| `\\n\"\n banner += \" ::'_ _ :: ,::/:|\\n\"\n banner += \" ,',' `-' \\\\ `. ,:::/,:|\\n\"\n banner += \" | : _ _ | ' ,::,' :::\\n\"\n banner += \" | \\\\ O`'O ,', , :,' ;::\\n\"\n banner += \" \\\\ `-'`--',:' ,' , ,,' ::\\n\"\n banner += \" ``:.:.__ ',-',' ::'\\n\"\n banner += \" -hrr- `--.__, ,::. ::'\\n\"\n banner += \" |: ::::. ::'\\n\"\n banner += \" |: :::::: ,::'\\n\"\n banner += \"########################################################\\n\"\n banner += \"# ruffer-overflow v0.2 #\\n\"\n banner += \"# don't \\\"bark\\\" up the wrong tree. #\\n\"\n banner += \"#======================================================#\\n\"\n banner += \"# weak-sauce tool for buffer-overflow #\\n\"\n banner += \"# please don't crime with it. #\\n\"\n banner += \"########################################################\\n\"\n print(banner)",
"def banner(self, banner):\n self._banner = banner",
"def banner():\n\n def random_color():\n valid_colors = (\"red\", \"green\", \"yellow\", \"blue\", \"magenta\", \"cyan\")\n return random.choice(valid_colors)\n\n autoRecon = rf\"\"\"\n _____________ ____ ________________\n /___/___ \\ / / | /___/__ \\ Mr.P-Millz _____\n O.G./ / _ \\______/__/ |______|__|_____ * \\_________________/__/ |___\n __/__/ /_\\ \\ | | \\ __\\/ _ \\| | __/ __ \\_/ ___\\/ _ \\| |\n | | ___ \\| | /| | ( |_| ) | | \\ ___/\\ \\__( |_| ) | |\n |___|____/\\__\\____|____/_|__|\\_\\____/|__|____|_ /\\___ |\\___ \\____/|___| /\n gtihub.com/Knowledge-Wisdom-Understanding \\___\\/ \\__\\/ \\__\\_/ v{V} \\___\\/\n\n\"\"\"\n\n def print_art(msg, color):\n colored_art = colored(msg, color=color)\n print(colored_art)\n\n color = random_color()\n print_art(autoRecon, color)",
"def banner(name):\n print \"#\"\n print \"# {0}\".format(name.encode('utf-8'))\n print \"#\"\n return name",
"def my_banner(bannerString):\n print(len(bannerString) * \"!\")\n print(bannerString)\n print(len(bannerString) * \"!\")",
"def banner(self):\n return self._banner",
"def banner():\n return \" » \".join(\n [\n f\"Robot Framework Kernel [{__version__}]\",\n f\"Robot Framework [{robot.__version__}]\",\n f\"ipykernel [{ipykernel.__version__}]\",\n f\"IPython [{IPython.__version__}]\",\n f\"Python [{sys.version}]\",\n ]\n )",
"def print_header(self):\n print()\n print(\"=\"*25)\n print()\n print(\"Have fun in your blackjack round!\")\n print()\n print(\"=\"*25)",
"def print_banner(filename: str, template: str = DEFAULT_BANNER_TEMPLATE) -> None:\n if not os.path.isfile(filename):\n logger.warning(\"Can't find logo banner at %s\", filename)\n return\n\n with open(filename, \"r\") as f:\n banner = f.read()\n\n formatted_banner = template.format(banner)\n print(formatted_banner)",
"def print_banner(title):\n\n title = \" \" + title + \" \"\n\n nequals = ncolumns - len(title)\n nleft = nequals // 2\n\n print((\"=\" * (nleft + nequals %2)) + title + (\"=\" * nleft))",
"def logo():\n print (\"\"\"\\\n _ _\n| |_ ___ ___ ___ _ _ _| |\n| | . | | -_| | | . |\n|_|_|___|_|_|___|_ |___|\n |___|\n \"\"\")\n print ('Author: Peter Sooky <448291@mail.muni.cz>')\n print ('Honeyd-python {0}'.format(honeyd.__version__))",
"def printlogo():\n print(\"\")\n print(\" ;;;;;;;;;;;;;;;;;;; \")\n print(\" ;;;;;;;;;;;;;;;;;;; \")\n print(\" ; ; \")\n print(\" ; bandaid ; \")\n print(\" ; ; \")\n print(\" ; +-----------+ ; \")\n print(\" ; |by JC 2020 | ; \")\n print(\" ; +-----------+ ; \")\n print(\" ; ; \")\n print(\",;;;;; ,;;;;; \")\n print(\";;;;;; ;;;;;; \")\n print(\"`;;;;' `;;;;' \")\n print(\"\")",
"def print_banner_block(text, width=80, mark=\"-\", end=\"#\"):\n num_spaces_total = width - len(text) - 2\n num_spaces_left = num_spaces_total // 2\n num_spaces_right = num_spaces_total - num_spaces_left\n banner_with_spaces = end + \" \" * num_spaces_left\n banner_with_spaces += text\n banner_with_spaces += \" \" * num_spaces_right + end\n border = end + mark * (width - 2) + end\n print(border)\n print(banner_with_spaces)\n print(border)",
"def bbs_show_banner(tn, short = True):\n lines = cmd.lban(tn, short_banner = short)\n for line in lines:\n print(filter_tags(line))",
"def print_the_header():\n print('-------------------')\n print(' Weather APP')\n print('-------------------')\n print()",
"def print_banner(\n cls,\n agent_label,\n inbound_transports,\n outbound_transports,\n public_did,\n admin_server=None,\n banner_length=40,\n border_character=\":\",\n ):\n print()\n with Banner(border=border_character, length=banner_length) as banner:\n # Title\n banner.title(agent_label or \"ACA\")\n # Inbound transports\n banner.subtitle(\"Inbound Transports\")\n internal_in_transports = [\n f\"{transport.scheme}://{transport.host}:{transport.port}\"\n for transport in inbound_transports.values()\n if not transport.is_external\n ]\n if internal_in_transports:\n banner.list(internal_in_transports)\n external_in_transports = [\n f\"{transport.scheme}://{transport.host}:{transport.port}\"\n for transport in inbound_transports.values()\n if transport.is_external\n ]\n if external_in_transports:\n banner.subtitle(\" External Plugin\")\n banner.list(external_in_transports)\n\n # Outbound transports\n banner.subtitle(\"Outbound Transports\")\n internal_schemes = set().union(\n *(\n transport.schemes\n for transport in outbound_transports.values()\n if not transport.is_external\n )\n )\n if internal_schemes:\n banner.list([f\"{scheme}\" for scheme in sorted(internal_schemes)])\n\n external_schemes = set().union(\n *(\n transport.schemes\n for transport in outbound_transports.values()\n if transport.is_external\n )\n )\n if external_schemes:\n banner.subtitle(\" External Plugin\")\n banner.list([f\"{scheme}\" for scheme in sorted(external_schemes)])\n\n # DID info\n if public_did:\n banner.subtitle(\"Public DID Information\")\n banner.list([f\"DID: {public_did}\"])\n\n # Admin server info\n banner.subtitle(\"Administration API\")\n banner.list(\n [f\"http://{admin_server.host}:{admin_server.port}\"]\n if admin_server\n else [\"not enabled\"]\n )\n\n banner.version(__version__)\n\n print()\n print(\"Listening...\")\n print()"
] | [
"0.7396634",
"0.73954254",
"0.727118",
"0.72663784",
"0.7056715",
"0.7042273",
"0.70386666",
"0.700003",
"0.675228",
"0.6678629",
"0.651769",
"0.6499614",
"0.6490508",
"0.6374026",
"0.63351125",
"0.62801325",
"0.62523764",
"0.623986",
"0.61925286",
"0.61004984",
"0.59268594",
"0.5877095",
"0.58561337",
"0.58067316",
"0.5794721",
"0.57774323",
"0.57552075",
"0.57110965",
"0.5668594",
"0.56129366"
] | 0.76449466 | 0 |
Returns a list of all BBS in the db | def get_list_of_bbs(self):
return self.mfp.get_list_of_bbs() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_bt(self):\n return list(self.collection.find({\"sensor_type\": \"bt\"}, {\"_id\": False})) # Return a list",
"def get_blists(self):\n return self.blists[:]",
"def get_bus_list():\n\n\tbuses = db.session.query(Bus.bus_name).all()\n\n \n\treturn buses",
"def list(cls, context, filters=None, limit=3000, marker=1,\n sort_key='id', sort_dir='asc'):\n db_boars = cls.dbapi.get_boar_list(\n context, limit=limit, marker=marker, sort_key=sort_key,\n sort_dir=sort_dir, filters=filters)\n\n #import pdb; pdb.set_trace()\n return [Boar._from_db_object(cls(context), obj) for obj in db_boars]",
"def __sync_bulbs__() -> list:\n\n bulbs = list()\n\n try:\n discovered_bulbs = discover_bulbs(timeout=2)\n except Exception as e:\n raise Exception(str(e))\n\n for bulb in discovered_bulbs:\n ip = bulb['ip']\n port = bulb['port']\n model = bulb['capabilities']['model']\n name = bulb['capabilities']['name']\n name = name if name != '' else ip\n identifier = bulb['capabilities']['id']\n\n found_bulb = Bulb(\n ip=ip,\n port=port,\n model=model\n )\n\n found_bulb.set_name(name)\n properties = found_bulb.get_properties()\n\n bulbs.append({\n 'bulb': found_bulb,\n 'name': name,\n 'model': model,\n 'ip': ip,\n 'metadata':\n {\n 'id': identifier,\n 'ip': ip,\n 'name': name,\n 'model': model,\n 'properties': properties\n }\n })\n\n return bulbs",
"def get_all(self):\n cursor = self._dbcon.cursor()\n cursor.execute(u\"select rowid,* from books\")\n result = cursor.fetchall()\n cursor.close()\n return [self._book_from_query_result(x) for x in result]",
"def list_dbs(self):\n return self.get('_all_dbs').json()",
"def get_all_bank_names() -> List[str]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list",
"def get_all_borrowed_books():\n return BorrowBook.query.all()",
"def get_blocks(self):\n cmd = \"\"\" SELECT * FROM %s; \"\"\" %(TABLE_BLOCKCHAIN)\n\n self.__dbcursor.execute(cmd)\n return self.__dbcursor.fetchall()",
"def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list",
"def demo_get_all_books(self):\n results = []\n self.cursor.execute(\"\"\"SELECT ISBN FROM book\"\"\")\n for book in self.cursor.fetchall():\n results.append(book[0])\n return results",
"def do_bay_list(cs, args):\n bays = cs.bays.list(marker=args.marker, limit=args.limit,\n sort_key=args.sort_key,\n sort_dir=args.sort_dir)\n columns = ['uuid', 'name', 'node_count', 'master_count', 'status']\n columns += utils._get_list_table_columns_and_formatters(\n args.fields, bays,\n exclude_fields=(c.lower() for c in columns))[0]\n utils.print_list(bays, columns,\n {'versions': magnum_utils.print_list_field('versions')},\n sortby_index=None)",
"def get_biases(self):\n return []",
"def vbd_list(name=None, call=None):\n if call == \"function\":\n raise SaltCloudSystemExit(\n \"This function must be called with -a, --action argument.\"\n )\n if name is None:\n return \"A name kwarg is rquired\"\n ret = {}\n data = {}\n session = _get_session()\n vms = session.xenapi.VM.get_by_name_label(name)\n if len(vms) == 1:\n vm = vms[0]\n vbds = session.xenapi.VM.get_VBDs(vm)\n if vbds is not None:\n x = 0\n for vbd in vbds:\n vbd_record = session.xenapi.VBD.get_record(vbd)\n data[\"vbd-{}\".format(x)] = vbd_record\n x += 1\n ret = data\n return ret",
"def get_all(self):\n return self.db",
"def get_list() -> List[BankDetails]:\n from paynlsdk.client.transaction import Transaction\n return Transaction.get_banks().banks",
"def get_all(user_id):\n return Bucketlist.query.filter_by(created_by=user_id)",
"def show_all_brain_dumps(user_id):\n\n # grab user in the session\n user_id = session.get(\"user_id\")\n\n # grabs all the brain dumps from the user and order them by date created\n brain_dumps = (\n User_Brain_Dump.query.filter_by(user_id=user_id)\n .order_by(desc(\"date_created\"))\n .all()\n )\n\n page, per_page, offset = get_page_args(\n page_parameter=\"page\", per_page_parameter=\"per_page\"\n )\n\n per_page = 5\n\n offset = (page - 1) * per_page\n total = len(brain_dumps)\n\n pagination_brain_dumps = brain_dumps[offset : offset + per_page]\n pagination = Pagination(\n page=page, per_page=per_page, total=total, css_framework=\"bootstrap4\"\n )\n\n return render_template(\n \"all-brain-dumps.html\",\n brain_dumps=pagination_brain_dumps,\n user_id=user_id,\n per_page=per_page,\n pagination=pagination,\n )",
"def list_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_data = self.get_databases()\n all_dbs = []\n for data in all_data:\n all_dbs.append(data[\"system:resource_name\"][\"@value\"])\n return all_dbs",
"def get_vendor_bills(self, count: int = 10) -> list:\n return list(\n itertools.islice(self.client.vendor_bills.get_all_generator(), count)\n )",
"def get_bh_obj(self, dbName):\n bh_xml = self.get_batchHistorical_XML(dbName)\n return self.get_batchHistorical_obj(bh_xml)",
"def produce_query_batches(self):\n self.__generate_queries()\n return self.__bobs",
"def get_bulbs(ip=None, name=None, model=None, metadata=False) -> list:\n bulbs = list()\n\n param = 'ip'\n value = ip\n return_all = False\n\n if name:\n param = 'name'\n value = name\n elif model:\n param = 'model'\n value = model\n elif not ip:\n return_all = True\n elif ip:\n ipaddress.ip_address(str(ip))\n\n for bulb in __sync_bulbs__():\n if bulb[param] == value or return_all:\n bulbs.append(bulb['metadata'] if metadata else bulb['bulb'])\n return bulbs",
"def list_buckets():\n for bucket in BUCKET_MANAGER.all_buckets():\n print(bucket)",
"def get_biases(self):\n if self.b is None:\n return []\n else:\n return [self.b]",
"def get_biases(self):\n if self.b is None:\n return []\n else:\n return [self.b]",
"def get_biases(self):\n return list(self.b.values())",
"def get_biases(self):\n return list(self.b.values())",
"def get_biases(self):\n return list(self.b.values())"
] | [
"0.69727236",
"0.67828137",
"0.6740348",
"0.65534073",
"0.6521765",
"0.63936526",
"0.6312505",
"0.6268653",
"0.62648267",
"0.62623644",
"0.6186435",
"0.61633617",
"0.61626595",
"0.6158413",
"0.6122159",
"0.611637",
"0.610624",
"0.5955863",
"0.5916003",
"0.58861715",
"0.5885258",
"0.5875262",
"0.5860586",
"0.584888",
"0.5844806",
"0.5834048",
"0.5834048",
"0.58271986",
"0.58271986",
"0.58271986"
] | 0.70204824 | 0 |
Gets the scaled topo file for a given id supercell id. | def get_scaledtopo(self,id):
lines = self.mfp.get_scaledtopo(id)
return lines | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def id_to_base_id(self, id):\n if self.xy_tiling is None and self.pc_tiling is None:\n return id\n return self.get_tile_from_path(id)[1]",
"def _get_feat_geo_from_file(self, id):\n path_feature, path_mask, path_geo = self._get_name_save(id)\n feature_filt_padded = torch.load(path_feature, map_location=torch.device('cpu')).long()\n mask = torch.load(path_mask, map_location=torch.device('cpu'))\n geo_filt_padded = torch.load(path_geo, map_location=torch.device('cpu'))\n return feature_filt_padded, mask, geo_filt_padded",
"def sersic_2d_image(data_dir):\n path = \"sersic_2d_image.fits.gz\"\n sersic_2d_path = os.path.join(data_dir, path)\n return fits.getdata(sersic_2d_path)",
"def get_sf (pdb_id):\n pdb_ftp_link = \"https://ftp.rcsb.org/pub/pdb/data/structures/all/structure_factors/\"\n url = pdb_ftp_link + \"r{}sf.ent.gz\".format(pdb_id)\n r = requests.get(url)\n with open(sf_path + \"r{}sf.cif.gz\".format(pdb_id), 'wb') as f:\n f.write(r.content)\n #unzips the downloaded file\n os.system(\"gunzip \"+sf_path + \"r{}sf.cif.gz\".format(pdb_id))\n return \"r{}sf.cif\".format(pdb_id)",
"def getbyid(self, id):\n\n return esd.retrieve(id)",
"def get_imc_topo(topo_file):\n topo_graph = nx.Graph()\n with open(topo_file, 'r') as f:\n for line in f.readlines():\n if (len(line) > 10) and (line[0] != '#'):\n split_data = line.split()\n source = split_data[0]\n dest = split_data[2]\n #capacity = 1000 # We are fixing this to one.\n capacity = get_imc_capacity(split_data[1], split_data[3])\n if not topo_graph.has_edge(source, dest):\n topo_graph.add_edge(source, dest, capacity = capacity)\n # Checks graph for any componnets and returns the largest one.\n topo_graph = validate_graph(topo_graph)\n f.close()\n return topo_graph",
"def cell_for_id(self, id):\n\t\tcell_id = (id & self.id2cell_mask) | u0xFFFFFFFF\n\t\tassert np.all(self.is_cell_id(cell_id))\n\n\t\t# TODO: Debugging (remove when happy)\n\t\tx, y, t, _ = self._xyti_from_id(id)\n\t\tcell_id2 = self._cell_id_for_xyt(x, y, t)\n\t\tassert np.all(cell_id2 == cell_id), 'cell_id2=%s cell_id=%s %s x=%s y=%s t=%s' % (cell_id2, cell_id, bin(cell_id), x, y, t)\n\n\t\treturn cell_id",
"def get_topogram(self, _id):\n return self.make_request(\"GET\", \"topograms/\"+_id, {})",
"def _get_disk_by_id(worker):\n cmd = (\n f\"oc debug nodes/{worker} --to-namespace={config.ENV_DATA['cluster_namespace']} \"\n f\"-- chroot /host ls -la /dev/disk/by-id/\"\n )\n return run_cmd(cmd)",
"def get_size_by_id(self, size_id):\n sizes = self._driver.list_sizes()\n size = [i for i in sizes if i.id == size_id][0]\n return size",
"def map_id_to_device(dev_map, osd_id):\n for elem in dev_map:\n if elem['id'] == osd_id:\n return elem['path']",
"def get_geometry(id):\n geom = read_kml()\n result = geom[\"geometry\"][id]\n # print(f\"get_geometry(id={id.__repr__()}) --> {result}\")\n # result.plot()\n return result",
"def get_path(self, path_id):\n\t\tpass",
"def _get_ss_proposal(self, img_id):\n\n if not os.path.isdir(os.path.join(self.root_dir, 'SSProposals')):\n print ('First time run. Refomatting selective search files ...')\n self._reformat_ss_data()\n\n cache_file = os.path.join(self.root_dir, 'SSProposals',\n img_id + '.pkl')\n\n with open(cache_file, 'rb') as fid:\n ss_proposals = cPickle.load(fid)\n return torch.from_numpy(ss_proposals['boxes'].astype(int)).float()",
"def get(cls, id):\n response = get_by_endpoint(\"computed_files/\" + str(id)).json()\n return ComputedFile(**response)",
"def get_from_gridfs(d, f):\n fs = gridfs.GridFS(d)\n b = fs.get(f).read()\n return b",
"def getDim(scale,supercell):\n \n # Check for standard scaling\n motiif_dict = {1:'molecular',supercell:'chains',\\\n supercell**2:'layered', supercell**3:'conventional'}\n if scale in motiif_dict:\n return(motiif_dict[scale])\n \n # If the structure is some intermediate, determine\n # which intermediate\n \n else:\n if scale < 1:\n motiif = 'shrunk molecular'\n elif scale < supercell:\n motiif = \"mol-chain\"\n elif scale < supercell**2:\n motiif = \"chain-2D\"\n elif scale < supercell**3:\n motiif = \"2D-conv\"\n else:\n motiif = 'Network size increased'\n return(motiif)",
"def get_sgd(self, id, name):\n # check if id exists in group definition\n if id in self.mstats.keys() and 'df' in self.mstats[id].keys():\n # print \"id %s in mstats\" % id\n type = 'group' if id.endswith('/') else 'dataset'\n sgd = {'id': id, 'type': type, 'ns':self.sdef['ns'], 'df': self.mstats[id]['df'],}\n # print \"found definition for %s in mstats, mstats=\" % id\n # pp.pprint(self.mstats)\n return sgd\n else:\n # see if parent group is specified in locations; if so, check for id in \n # locations list of members of parent group. Example for nwb format is are\n # \"UnitTimes/\" inside <module>/. <module> is parent group\n pid = self.sdef['id'] # parent id, e.g. \"<module>\"\n ns = self.sdef['ns']\n if pid in self.file.ddef[ns]['locations']:\n if id in self.file.ddef[ns]['locations'][pid]:\n type = 'group' if id.endswith('/') else 'dataset'\n # add id to mstats so can register creation of group\n self.mstats[id] = {'ns':ns, 'created': [], 'qty': '+', \n 'type': type} # todo: jeff, need to check df\n sgd = self.file.get_sdef(id, ns, \"referenced in make_subgroup\")\n # print \"id %s in %s location ns %s structures\" % (id, pid, ns)\n # example output: id UnitTimes/ in <module>/ location ns core structures\n # traceback.print_stack()\n return sgd\n else:\n print \"found parent %s in locations, but %s not inside\" % (pid, id)\n print \"locations contains:\"\n pp.pprint(self.file.ddef[ns]['locations'][pid])\n else:\n print \"did not find parent %s in locations for namespace %s\" % (pid, ns)\n print \"** Error, attempting to create '%s' (name='%s') inside group:\" % (id, name)\n print self.full_path\n print \"But '%s' is not a member of the structure for the group\" % id\n print \"Valid options are:\", self.mstats.keys()\n # print \"Extra information (for debugging): Unable to find definition for node %s\" % id\n # print \"mstats=\"\n # pp.pprint(self.mstats)\n traceback.print_stack()\n sys.exit(1)",
"def from_id(self, id_):\n return self._id_to_loadout.get(id_)",
"def get_climatology(gt_id, mask_df=None, shift=None):\n # Load global climatology if US climatology requested\n gt_id = gt_id.replace(\"us_\", \"global_\")\n climatology_file = os.path.join(\"data\", \"dataframes\",\n \"official_climatology-\"+gt_id+\"-1981-2010.h5\")\n return load_measurement(climatology_file, mask_df, shift)",
"def from_id(self, id_):\n return self._name_to_operator.get(id_)",
"def stich_from_id(id, title):\n response = requests.get('https://vangoghmuseum-assetserver.appspot.com/tiles?id=%s' % id)\n data = json.loads(response.text)\n stich(data, title)",
"def get(self, id):\n file = (\n self.drive.files()\n .get(\n fileId=id,\n fields=\"id, name\",\n supportsAllDrives=self.shared_drive[0],\n )\n .execute()\n )\n return file",
"def _get_depth_map_scale_subfolder(self):\n if self.im_scale <= 0.25:\n if self.im_scale <= 0.125:\n return \"Depth/0.125/\"\n else:\n return \"Depth/0.25/\"\n else: \n return \"Depth/\"",
"def get_by_id(self, id: str) -> \"Dataset\":\n raise NotImplementedError",
"def get_specific_tile(idx, tiles_gdf):\n tile_poly = tiles_gdf.iloc[idx]['geometry']\n # print(tile_poly.bounds)\n return tile_poly",
"def get_tile(self, tile, as_png=False, overwrite=True):\n zoom, row, col = tile\n output_path = self.config[\"output_name\"]\n zoomdir = os.path.join(output_path, str(zoom))\n rowdir = os.path.join(zoomdir, str(row))\n image_path = os.path.join(rowdir, str(col)+\".png\")\n if os.path.isfile(image_path):\n return send_file(image_path, mimetype='image/png')\n else:\n try:\n self.save_tile(tile)\n except:\n print \"tile not available\", tile\n size = self.tile_pyramid.tile_size\n empty_image = Image.new('RGBA', (size, size))\n return empty_image.tobytes()\n return send_file(image_path, mimetype='image/png')",
"def get_tile(cls, tile_id):\n\n return Tile.tile_listing.get(tile_id, None)",
"def get_by_id(self, id):\n return self._mzml_parser.get_by_id(id)",
"def get_file(self, sys_id):\n url = \"{}/file\".format(self._target(sys_id))\n r = self._client.session.get(url, stream=True)\n return r"
] | [
"0.5464149",
"0.50709033",
"0.50375205",
"0.5031053",
"0.48634726",
"0.48348683",
"0.4817579",
"0.4804395",
"0.47939923",
"0.47609693",
"0.46538952",
"0.4650901",
"0.46247876",
"0.46213096",
"0.45644408",
"0.45580676",
"0.4558013",
"0.4542269",
"0.45237747",
"0.45166185",
"0.45030698",
"0.4500567",
"0.44944832",
"0.44865754",
"0.4479755",
"0.44778517",
"0.44692793",
"0.4466645",
"0.44433647",
"0.44399855"
] | 0.7465723 | 0 |
Gets the orients file for a given id supercell id. | def get_orients(self,id):
lines = self.mfp.get_orients(id)
return lines | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_orientations(self):\n for atom in self.invarioms:\n atom.get_orientation()",
"def orient(self):\n self._read(False)\n return self._readings.orient",
"def get_orientations(self, int32 dim, codim=None):\n if codim is not None:\n dim = self.tdim - codim\n\n if dim == 1:\n return self.edge_oris\n\n elif dim == 2:\n return self.face_oris\n\n else:\n raise ValueError('only edges or faces have orientations! (%d)'\n % dim)",
"def get(self, id):\n file = (\n self.drive.files()\n .get(\n fileId=id,\n fields=\"id, name\",\n supportsAllDrives=self.shared_drive[0],\n )\n .execute()\n )\n return file",
"def __data_file_for_row_id(self, row_id):\n file_row_id = int(row_id) % int(self.rows_per_page)\n if file_row_id == 0:\n second_number = (int(row_id) // int(self.rows_per_page)) * int(self.rows_per_page)\n first_number = second_number - int(self.rows_per_page) + 1\n else:\n first_number = (int(row_id) // int(self.rows_per_page)) * int(self.rows_per_page) + 1\n second_number = first_number + int(self.rows_per_page) - 1\n path = self.path + '/data' + str(first_number) + '_' + str(second_number) + '.dat'\n return path",
"def orient(self):\n return self.__ph.get('orient', PH_ORIENT_HORZ)",
"def get_mof_structure_by_id(self,strucid, mol = False):\n lines,name = self.mfp.get_mof_structure_by_id(strucid)\n return lines",
"def get_path(self, path_id):\n\t\tpass",
"def get_orientation(self):\r\n return self.__orientation",
"def getsameIDList(id, file):\n glineList = []\n newread = []\n \n for line in open(file):\n itemList = line[:-1].split('\\t')\n line_id = getsubString(itemList[0],'|')\n \n if id == line_id:\n glineList.append(line)\n else:\n newread.append(line)\n return glineList",
"def id_to_index(self, id):\n raise NotImplementedError",
"def _get_file_by_id(id):\n query = \"\"\"SELECT * FROM files WHERE id = (:id) LIMIT 1\"\"\"\n param_obj = {'id': id}\n return _execute(query, param_obj)",
"def escribir_indir(self, FILESYS, id,name_file=\"Xx.xXx.xXx.xXx.\",\n size_file=\"\",inicluster=\"\",cdate=\"\",mdate=\"\",no_use=\"\"):\n byte = 512\n tamanno_indir = 64\n id = int(id)\n try:\n FILESYS[byte+(tamanno_indir*id):byte+(tamanno_indir*id)+15] =\\\n ((\" \"*(15-len(str(name_file))))+str(name_file)).encode('ascii')\n except:\n print(\"Nombre no valido\")\n return False\n FILESYS[byte+(tamanno_indir*id)+16:byte+(tamanno_indir*id)+24] =\\\n (\"0\"*(8-len(str(size_file)))+str(size_file)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+25:byte+(tamanno_indir*id)+30] =\\\n (\"0\"*(5-len(str(inicluster)))+str(inicluster)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+31:byte+(tamanno_indir*id)+45] =\\\n (\"0\"*(14 - len(str(cdate)))+str(cdate)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+46:byte+(tamanno_indir*id)+60] =\\\n (\"0\"*(14 - len(str(mdate)))+str(mdate)).encode('ascii')\n FILESYS[byte+(tamanno_indir*id)+61:byte+(tamanno_indir*id)+64] =\\\n (\"\\x00\"*(3 - len(str(no_use)))+str(no_use)).encode('ascii')\n return True",
"def get_orientation(self):\n return self._orientation",
"def _levelFromIfd(self, ifd, baseifd):\n sizeX = ifd['tags'][tifftools.Tag.ImageWidth.value]['data'][0]\n sizeY = ifd['tags'][tifftools.Tag.ImageLength.value]['data'][0]\n tileWidth = baseifd['tags'][tifftools.Tag.TileWidth.value]['data'][0]\n tileHeight = baseifd['tags'][tifftools.Tag.TileLength.value]['data'][0]\n for tag in {\n tifftools.Tag.SamplesPerPixel.value,\n tifftools.Tag.BitsPerSample.value,\n tifftools.Tag.PlanarConfig.value,\n tifftools.Tag.Photometric.value,\n tifftools.Tag.Orientation.value,\n tifftools.Tag.Compression.value,\n tifftools.Tag.TileWidth.value,\n tifftools.Tag.TileLength.value,\n }:\n if ((tag in ifd['tags'] and tag not in baseifd['tags']) or\n (tag not in ifd['tags'] and tag in baseifd['tags']) or\n (tag in ifd['tags'] and\n ifd['tags'][tag]['data'] != baseifd['tags'][tag]['data'])):\n msg = 'IFD does not match first IFD.'\n raise TileSourceError(msg)\n sizes = [(self.sizeX, self.sizeY)]\n for level in range(self.levels - 1, -1, -1):\n if (sizeX, sizeY) in sizes:\n return level\n altsizes = []\n for w, h in sizes:\n w2f = int(math.floor(w / 2))\n h2f = int(math.floor(h / 2))\n w2c = int(math.ceil(w / 2))\n h2c = int(math.ceil(h / 2))\n w2t = int(math.floor((w / 2 + tileWidth - 1) / tileWidth)) * tileWidth\n h2t = int(math.floor((h / 2 + tileHeight - 1) / tileHeight)) * tileHeight\n for w2, h2 in [(w2f, h2f), (w2f, h2c), (w2c, h2f), (w2c, h2c), (w2t, h2t)]:\n if (w2, h2) not in altsizes:\n altsizes.append((w2, h2))\n sizes = altsizes\n msg = 'IFD size is not a power of two smaller than first IFD.'\n raise TileSourceError(msg)",
"def get_deposition(self, id: uplink.Path):\n pass",
"def get_org_spec_dir(self, org_id):\n return self._get_org_base_dir(org_id)",
"def _get_organisms_file_path(self, gene_name, gene_id):\n return os.path.join(os.getcwd(), \"src\", \"data\", \"organisms\", \"{}_{}.txt\".format(gene_name, gene_id))",
"def read(self):\n if self.getiddname() is None:\n errortxt = (\n \"IDD file needed to read the idf file. \"\n \"Set it using IDF.setiddname(iddfile)\"\n )\n raise IDDNotSetError(errortxt)\n readout = idfreader1(\n self.idfname, self.iddname, self, commdct=self.idd_info, block=self.block\n )\n (self.idfobjects, block, self.model, idd_info, idd_index, idd_version) = readout\n self.setidd(idd_info, idd_index, block, idd_version)",
"def getRoiInfo(self, fh):\n fn = fh.name()\n rf = open(fn[:-4]+'.roi', 'r')\n rois = np.loadtxt(rf)\n return rois",
"def orientation(self):\n agents = self.board[self.agent_locs_idx]\n out = (agents & CellTypes.orientation_mask) >> CellTypes.orientation_bit\n return out.astype(np.int64)",
"def read_hierarchy(self, fid):\r\n\r\n lin = self.read_line(fid)\r\n \r\n while lin != 'end':\r\n parts = lin.split()\r\n if lin != 'begin':\r\n ind = self.get_index_by_name(parts[0])\r\n for i in range(1, len(parts)):\r\n self.vertices[ind].children.append(self.get_index_by_name(parts[i]))\r\n lin = self.read_line(fid)\r\n lin = self.read_line(fid)\r\n return lin",
"def __row_id_in_file(self, row_id):\n #if our table doesn't have any rows yet\n if row_id == 0:\n return 0\n else:\n file_row_id = int(row_id) % int(self.rows_per_page)\n if file_row_id == 0:\n file_row_id = int(file_row_id) + int(self.rows_per_page)\n return file_row_id",
"def get_read_orientation_outward(self, ctx, params):\n # ctx is the context object\n # return variables are: returnVal\n #BEGIN get_read_orientation_outward\n\n if 'workspace_name' not in params:\n raise ValueError('Parameter workspace_name is not set in input arguments')\n workspace_name = params['workspace_name']\n if 'id' not in params:\n raise ValueError('Parameter id is not set in input arguments')\n objid = params['id']\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n try:\n\n objref = workspace_name + '/' + str(objid)\n\n # Note that results from the workspace are returned in a list\n returnVal = wsClient.get_objects([{'ref': objref}])[0]\n\n if returnVal is not None:\n if returnVal['data']['single_genome'] is not None:\n returnVal = returnVal['data']['single_genome']\n\n print \"is_single_genome issingle \" + str(returnVal)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n orig_error = ''.join(' ' + line for line in lines)\n raise ValueError('Error from workspace:\\n' + orig_error)\n\n #END get_read_orientation_outward\n\n # At some point might do deeper type checking...\n if not isinstance(returnVal, int):\n raise ValueError('Method get_read_orientation_outward return value ' +\n 'returnVal is not type int as required.')\n # return the results\n return [returnVal]",
"def galaxy2_orbital_orientation(self):\n return self._galaxy2_orbital_orientation",
"def getID(filePath):\r\n\r\n fileID = rmn.fstopenall(filePath,rmn.FST_RO)\r\n return fileID",
"def get_entrance_junction(self, id):\n return self.sections[id][0]",
"def calculateOrientation(self):\n orientation = [\n [Orientation.LANDSCAPE, Orientation.PORTRAIT],\n [Orientation.CW_LANDSCAPE, Orientation.CW_PORTRAIT],\n [Orientation.CCW_LANDSCAPE, Orientation.CCW_PORTRAIT],\n [Orientation.FLIPPED_LANDSCAPE, Orientation.FLIPPED_PORTRAIT],\n ]\n exif = self.getExif()\n if exif and exif['Orientation'] == 3: #180\n rotation = 3\n elif exif and exif['Orientation'] == 6: #90 CCW\n rotation = 2\n elif exif and exif['Orientation'] == 8: #90 CW\n rotation = 1\n else:\n rotation = 0\n\n if self.isLandscape():\n return orientation[rotation][0]\n else:\n return orientation[rotation][1]",
"def interacs_maker(path, filenames, users_ids, output):\n\tids_dict = {}\n\twith open(users_ids, 'r', encoding='utf-8') as f:\n\t\tfor line in f:\n\t\t\tline = line.strip().split(',',1)\n\t\t\tid, screen_name = line[0], line[1]\n\t\t\tids_dict[screen_name] = id\n\n\t# with open(users_ids, 'r', encoding='utf-8') as f:\n\t# \tids = f.read().split(\"\\n\")\n\t# enum_ids = enumerate(ids)\n\n\tpaths = complete_paths(path, filenames)\n\t\n\tinteractions = []\n\n\tfor i in range(0, len(paths)):\n\t\tscreen_name = paths[i][36:-4] # 乁〳 ❛ д ❛ 〵ㄏ\n\t\tid = ids_dict[screen_name]\n\t\t# index = [i for i, s in enum_ids if screen_name in s][0]\n\t\t# id = re.search(r'\\d+', ids[index]).group()\n\t\t\n\t\twith open(paths[i], 'r', encoding='utf-8') as f:\n\t\t\tprint(paths[i])\n\t\t\tfor line in f:\n\t\t\t\tprint(line)\n\n\t\t\t\tif line=='\\n' or line=='':\n\t\t\t\t\tcontinue\n\n\t\t\t\tline = line.strip().split(',') # Ya no hay temor: números enteros. Coma sólo separa rating de item_id\n\t\t\t\tinteractions.append( [id, line[1], line[0]] ) # [user_id, item_id, rating]\n\n\twith open(output, 'w+') as f:\n\t\tfor triple in interactions:\n\t\t\tf.write(\"{0},{1},{2}\\n\".format( triple[0], triple[1], triple[2] ) )\n\n\treturn 0",
"def get_files(self, sid):\n try:\n return self.datas.get(sid)\n except Exception as ex:\n raise ex"
] | [
"0.5392666",
"0.5238473",
"0.50075924",
"0.47091204",
"0.46686122",
"0.46168557",
"0.4579395",
"0.45001397",
"0.44533232",
"0.44359028",
"0.44122255",
"0.44016522",
"0.439454",
"0.4392099",
"0.43870813",
"0.43759003",
"0.4370313",
"0.43686095",
"0.43557945",
"0.43431535",
"0.43205103",
"0.43056014",
"0.42863637",
"0.42746025",
"0.4250367",
"0.4248899",
"0.42487305",
"0.4245979",
"0.42392868",
"0.42298105"
] | 0.65023 | 0 |
Retrieve the list of films in which a character appears | def getFilms(character):
ret = []
for film in character.get('films'):
number = int(film.rstrip('/').rpartition('/')[2])
if number not in cache:
response = requests.get(film)
response = response.json()
title = response.get('title')
cache[number] = title
ret.append(cache.get(number))
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def castFilmography (movies, minAppearances):\n actors = {}\n for (k,v) in movies.items():\n for a in v[2:7]:\n actors[a] = actors.get(a, []) + [k]\n return sorted([ [k] + v for (k,v) in actors.items() if len(v) >= minAppearances ])",
"def get_cards(self):\n return [Flashcard.from_word(word) for word in self.get_words()]",
"def search(self):\n datas = self.cleaned_data\n films = Film.objects\n if datas['title']:\n films = films.filter(Q(title_fr__icontains=datas['title']) | Q(title_en__icontains=datas['title']))\n if datas['character']:\n films = films.filter(Q(actors__firstname__icontains=datas['character']) | Q(actors__lastname__icontains=datas['character']))\n if datas['country']:\n films = films.filter(countries__icontains=datas['country'])\n if datas['start_date']:\n films = films.filter(release_date__gte=datas['start_date'])\n if datas['end_date']:\n films = films.filter(release_date__lte=datas['end_date'])\n if datas['play']:\n films = films.filter(play_references__play=datas['play'])\n if datas['adaptation']:\n films = films.filter(play_references__type__name=datas['adaptation'])\n if datas['contributor']:\n films = films.filter(contributor=datas['contributor'])\n return films",
"def furanose_names(self):\n output = set()\n for item in self.monomers():\n if item in self.furanose_fac:\n output.add(self.furanose_fac[item][\"name\"])\n return list(output)",
"def _get_genres(self):\n separated = self.movies['genre'].apply(self.separate_genre)\n return {g: True for x in separated for g in x}.keys()",
"def search_character(realm_list, PATH):\r\n dict_char = {}\r\n for realm in realm_list:\r\n char_list = os.listdir(PATH + realm)\r\n dict_char[realm] = char_list\r\n return dict_char, realm_list",
"def filter(self, ffun):\n # BEGIN\n lst = []\n for item in WordSet(self.text).words():\n # if len(item) == len(ffun):\n # lst.append(item)\n if ffun(item) == True:\n lst.append(item)\n return lst\n\n # END",
"def get_documents():\n documents = []\n for category in movie_reviews.categories():\n for fileid in movie_reviews.fileids(category):\n documents.append((list(movie_reviews.words(fileid)), category))\n \n return documents",
"def get_imdb_list():\n list_file = 'imdb.txt'\n name_column = 26\n f = open(list_file, 'r')\n film_list = []\n pos = 0\n\n for line in f:\n pos += 1\n words = line.split()\n name = line[name_column:-1]\n # could be problematic is there are brackets in the film name\n year = name[name.find('(') + 1:name.find(')')]\n name = name.replace('(' + year + ')', '')\n film = {\n 'pos': pos,\n 'score': Decimal(words[2]),\n 'name': name.strip(),\n 'year': year\n }\n film_list.append(film)\n f.close()\n return film_list",
"def get_documents(self, value, key='name'):\n documents = []\n for doc in value:\n if doc.endswith('.json'):\n key = 'filename'\n documents.append([x for x in self.vocab if x[key] == doc])\n return documents",
"def get_bfi_list():\n list_file = 'bfi_sight_and_sound_2012.txt'\n f = open(list_file, 'r')\n film_list = []\n\n for line in f:\n words = line.split(' ')\n #NOTE: pos is not the position in the pyton list but in the original\n # list so is not always an integer due to joint places\n film = {'pos': words[0], 'name': words[1][:-1]}\n film_list.append(film)\n f.close()\n return film_list",
"def get_filenames(self):\n return [doc['filename'] for doc in self.vocab]",
"def song_by_word(ans):\r\n songs_list = \"\"\r\n ans = ans.lower()\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n song = str(song)\r\n if ans in song.lower():\r\n songs_list += song + \", \"\r\n return songs_list[:-2]",
"def getlistofpossibletitles(fileitem,shows):\n title = []\n title.append(fileitem)\n lookfor = fileitem.replace(\".\",\" \")\n title.append(lookfor)\n lookfor = fileitem.replace('-',\" \")\n title.append(lookfor)\n return title",
"def allPossibleWords(Rack):\n def checkWord(word):\n return stringInRack(word,Rack)\n return filter(checkWord, Dictionary)",
"def get_cites_species():\n mongo_db = mongo_client_db()\n cursor = mongo_db[CITES_COLLECTION].find({'full_name': {'$ne': None}}, {'full_name':1})\n return [r['full_name'].encode('utf8') for r in cursor]",
"def character_statistics(file_name):\n from operator import itemgetter\n import collections\n cnt = collections.Counter()\n\n try:\n fsock = open(file_name,'r')\n except IOError:\n print (\"The file does not exist, exiting gracefully\")\n\n for line in fsock:\n for c in line.rstrip().lower():\n if c.isalpha():\n cnt[c] += 1\n\n lessAbundant = cnt.most_common()[len(cnt)-1][1]\n #print(type(cnt.most_common()[len(cnt)-1]))\n #print(lessAbundant)\n #print (cnt.most_common()[-4:len(cnt)])\n #print (sorted(cnt.items(), key=itemgetter(1))[0])\n #print (cnt.most_common())\n\n # list comprehension\n #lessCommon = sorted([k for (k,v) in cnt.most_common() if v == lessAbundant])[0]\n # tuple unpacking, filter and map\n lessCommon = sorted(list(filter( lambda t: t[1] == lessAbundant, cnt.most_common())))[0][0]\n #lessCommon = map( lambda (keyLetter,_): keyLetter, filter( lambda (_,freqVal): freqVal == lessAbundant, cnt.most_common()) )\n #print(lessCommon)\n\n return (cnt.most_common()[0][0], lessCommon)",
"def get_intent_filers(apk):\n # FIXME : not sure this fully reproduce Koodous filters\n res = []\n filters = apk.xml['AndroidManifest.xml'].findall(\".//intent-filter\")\n for f in filters:\n for ff in f.findall('.//action'):\n filt = ff.get('{http://schemas.android.com/apk/res/android}name')\n if filt:\n res.append(filt)\n return res",
"def list_favor(self):\n if \"all\" in self.switches:\n favors = Reputation.objects.exclude(favor=0).order_by(\"-date_gossip_set\")\n self.msg(\"Characters with favor: %s\" % \", \".join(str(ob) for ob in favors))\n return\n org = self.get_organization(check_perm=False)\n favors = org.reputations.filter(Q(favor__gt=0) | Q(favor__lt=0)).order_by(\n \"-favor\"\n )\n msg = \"{wThose Favored/Disfavored by %s{n\\n\" % org\n msg += \"\\n\\n\".join(\n \"{c%s{w (%s):{n %s\" % (ob.player, ob.favor, ob.npc_gossip) for ob in favors\n )\n self.msg(msg)",
"def known(words):\n return [w for w in words if w in tokenizer.vocab] #change vocab file?",
"def specificWordList(catsString):\n cats = catsStringToArray(catsString)\n wordList = []\n for i in cats:\n for word in Word.objects.all().filter(category=i):\n wordList.append(word)\n return wordList",
"def get_oscars_best_picture_list():\n list_file = 'oscar_best_picture_list.txt'\n f = open(list_file, 'r')\n film_list = []\n\n for line in f:\n words = line.split('-')\n film = {\n 'year': words[0][:-1],\n 'name': words[1][2:-2]\n }\n film_list.append(film)\n f.close()\n # Reverse as we want newest first not last\n film_list.reverse()\n return film_list",
"def find_genre_playlists(data):\n playlists = []\n\n if data['genre']:\n playlists += data['genre']\n\n if data['comments']:\n playlists += data['comments']\n\n matches = re.findall('\\(\\s*(cover|live|unplugged|acoustic|remix|instrumental)', data['title'].lower())\n if matches:\n if 'cover' in matches:\n matches.remove('cover')\n matches += ['covers']\n\n if 'acoustic' in matches:\n matches.remove('acoustic')\n matches += ['unplugged']\n\n if 'remix' in matches:\n matches.remove('remix')\n matches += ['remix']\n\n if 'instrumental' in matches:\n matches.remove('instrumental')\n matches += ['instrumental']\n\n playlists += matches\n\n return set([x for x in playlists if x != 'none'])",
"def recognize_pic(path):\n results = recognize(path, access_token, cookie, fb_dtsg)\n\n names = [str(result['name']) for result in results]\n print ('%s contains %s' % (path, names))\n\n return {\n \"filename\": path,\n \"friends\": names\n }",
"def frequent_words(text, k):\n\n frequent_patterns = []\n freq_map = frequency_table(text, k)\n max_val = max_map(freq_map)\n for key in freq_map.keys():\n if freq_map[key] == max_val:\n frequent_patterns.append(key)\n return frequent_patterns",
"def all_facenames ( ):\n global facenames\n \n if facenames is None:\n facenames = FontEnumerator().facenames()\n facenames.sort()\n return facenames",
"def search(self, filtr):\n return [note for note in self.notes if note.match(filtr)]",
"def api_read_foundations(self):\n return [str(found.get_topmost_card()) for found in self.board.foundations]",
"def find_cliche(self,datapath,filename):\r\n data = self.common.read_csv(datapath,filename)\r\n ##speechtext = data.speechtext.str.replace(r'[^\\w\\s\\,?]','') #Removing all panctuations from speech text\r\n speechtext = data.speechtext.str.lower()\r\n\r\n #Using tf idf to find words or tokens that are less important\r\n vectorizer = TfidfVectorizer(decode_error='replace',stop_words='english',encoding='utf-8')\r\n tfidf = vectorizer.fit_transform(speechtext.apply(lambda x: np.str_(x)))\r\n\r\n terms = vectorizer.get_feature_names()\r\n sums = tfidf.sum(axis=0)\r\n data = []\r\n for col, term in enumerate(terms):\r\n data.append( (term, sums[0,col] ))\r\n\r\n ranking = pd.DataFrame(data, columns=['term','rank'])\r\n cliches = ranking.sort_values('rank', ascending=False).nlargest(25, 'rank')\r\n found_cliches = cliches.term.values\r\n #print(found_cliches)\r\n return found_cliches",
"def known(words: list[str]) -> list[str]:\n return [z for z in list(set(words)) if z in self.words]"
] | [
"0.59737927",
"0.55234385",
"0.545212",
"0.54288036",
"0.5389295",
"0.5367945",
"0.5315806",
"0.53116363",
"0.53098303",
"0.52998656",
"0.52903426",
"0.5223463",
"0.521527",
"0.5212591",
"0.5199618",
"0.5162614",
"0.5160674",
"0.51528895",
"0.5130299",
"0.5123009",
"0.5109176",
"0.5090501",
"0.5086243",
"0.5060003",
"0.5059296",
"0.50484776",
"0.5030889",
"0.5025591",
"0.5011109",
"0.49966523"
] | 0.6638027 | 0 |
Registers a backward hook on the used to save the gradients of the embeddings for use in get_gradients() when there are multiple inputs (e.g., a passage and question), the hook will be called multiple times. We append all the embeddings gradients to a list. | def _register_embedding_gradient_hooks(self, embedding_gradients):
def hook_layers(module, grad_in, grad_out):
embedding_gradients.append(grad_out[0])
backward_hooks = []
embedding_layer = self.get_embeddings_layer()
backward_hooks.append(embedding_layer.register_backward_hook(hook_layers))
return backward_hooks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _register_post_backward_hooks(self) -> None:\n if not torch.is_grad_enabled():\n return # don't register grad hooks if grad isn't enabled\n for p in self.full_params:\n if p.requires_grad:\n if hasattr(p, \"_shard_bwd_hook\"):\n continue\n # Register a hook on the first call, empirically, autograd\n # fires it at the end for this param, which makes sense.\n p_tmp = p.expand_as(p) # Get a grad_fn on p_tmp.\n assert p_tmp.grad_fn is not None\n grad_acc = p_tmp.grad_fn.next_functions[0][\n 0] # Gets its GradAccumulation object.\n handle = grad_acc.register_hook(\n functools.partial(self._post_backward_hook, p))\n p._shard_bwd_hook = (grad_acc, handle)",
"def _fp32_register_post_backward_hooks(self):\n\n # Helper function to avoid issues with late binding closures\n def make_post_backward_hook(param):\n def post_backward_hook(*unused):\n self._fp32_optim_grad_sync_needed = True\n if hasattr(param, 'main_grad'):\n with torch.no_grad():\n if param.grad is not None:\n param.main_grad += param.grad\n param.grad = None\n\n return post_backward_hook\n\n # Construct hooks and register with params\n self._fp32_grad_accs = []\n for param in self._fp32_optim_main_params.keys():\n param_tmp = param.expand_as(param)\n grad_acc = param_tmp.grad_fn.next_functions[0][0]\n hook = make_post_backward_hook(param)\n grad_acc.register_hook(hook)\n self._fp32_grad_accs.append(grad_acc)",
"def _register_pre_backward_hooks(self, outputs: Any) -> Any:\n if not torch.is_grad_enabled():\n return outputs # don't register hooks if grad isn't enabled\n\n if self._is_root:\n # This actually means that only root instance has\n # _post_backward_callback_queued defined. Accidentally accessing this field\n # will assert on all other instances, giving us a nice bug checker.\n self._post_backward_callback_queued = False\n\n def _pre_backward_hook(t_grad: torch.Tensor) -> None:\n # try to queue final backward callback only once for root, so\n # that final backward callback is attached to the outer most\n # backward graph task and called after all the backward\n # calls are completed.\n if self._is_root:\n self._queue_wait_for_post_backward()\n\n if self.optimization_barrier_in_backward:\n self._try_adding_to_backward_opt_barrier_lists(t_grad)\n # All-gather full parameters or switching to the full params.\n # Note, ``self._rebuild_full_params`` is idempotent. So in case it is called\n # unnecessarily, it doesn't incur much overhead.\n if self.reshard_after_forward:\n dependency_tensors = []\n if self.optimization_barrier_in_backward:\n # Ensure that backward pass ops of feature gradients, parameter\n # gradient and sharding, and full-param freeing (which are usually\n # performed in previous modules and are registered to\n # self._backward_opt_barrier_tensors in _grad_opt_barrier_hook,\n # _pre_backward_hook, and _post_backward_hook) are finished before\n # rebuilding the full params of this FSDP module.\n dependency_tensors = self._backward_opt_barrier_tensors\n self._rebuild_full_params(\n dependency_tensors=dependency_tensors,\n apply_opt_barrier=self.optimization_barrier_in_backward)\n self._clear_backward_opt_barrier_lists()\n\n # Only run the following once per iteration (i.e. in case\n # it is multiple outputs or multiple forward passes).\n if not self._pre_backward_hook_has_run:\n self._pre_backward_hook_has_run = True\n # Start of a backward pass for the first time in an iteration.\n self.assert_state([TrainingState.IDLE, TrainingState.BACKWARD_PRE])\n # Check p.grad to make sure that it is in the right shape, device, etc.\n for p, p_shard in zip(self.full_params, self.sharded_params):\n if p.grad is not None:\n assert p.grad.device == p_shard.device\n assert p.grad.size() == p_shard._orig_size\n\n # Transition to BACKWARD_PRE state if currently IDLE. We can transition from BACKWARD_POST\n # to IDLE when FSDP is within activation checkpointing and called multiple times, due to the\n # extra forward pass for re-computation.\n if self.training_state == TrainingState.IDLE:\n self.training_state = TrainingState.BACKWARD_PRE\n self.assert_state(\n [TrainingState.BACKWARD_PRE, TrainingState.BACKWARD_POST])\n\n if self.optimization_barrier_in_backward:\n self._try_adding_to_backward_opt_barrier_lists(t_grad)\n self.optimization_barrier_op([t_grad])\n t_grad = t_grad.view(t_grad.size()) # a view with barrier applied\n return t_grad\n\n _registered = 0\n\n def _register_hook(t: torch.Tensor) -> torch.Tensor:\n # We don't register the pre_backward hook on the same tensor that has been\n # returned from an inner FSDP, unless it is the first one.\n nonlocal _registered\n assert self._output_pre_backward_hook_registered is not None\n if t.requires_grad and (_registered == 0 or id(t)\n not in self._output_pre_backward_hook_registered):\n t.register_hook(_pre_backward_hook)\n self._output_pre_backward_hook_registered.add(id(t))\n _registered += 1\n return t\n\n # Attach hooks to Tensor outputs.\n outputs = apply_to_tensors(_register_hook, outputs)\n\n return outputs",
"def _get_gradients(self, batch):\n embedding_gradients = []\n original_param_name_to_requires_grad_dict = {}\n \n for param_name, param in self.model.named_parameters():\n original_param_name_to_requires_grad_dict[param_name] = param.requires_grad\n param.requires_grad = True\n \n hooks = self._register_embedding_gradient_hooks(embedding_gradients)\n loss = self.forward_step(batch)\n\n self.model.zero_grad()\n loss.backward()\n\n for hook in hooks:\n hook.remove()\n\n # restore the original requires_grad values of the parameters\n for param_name, param in self.model.named_parameters():\n param.requires_grad = original_param_name_to_requires_grad_dict[param_name]\n\n return embedding_gradients[0]",
"def _register_post_backward_hooks(\n state: _State,\n handles: List[FlatParamHandle],\n) -> None:\n # If there is no gradient computation, then there is no need for\n # post-backward logic\n if not torch.is_grad_enabled():\n return\n for handle in handles:\n flat_param = handle.flat_param\n already_registered = hasattr(flat_param, \"_post_backward_hook_state\")\n if already_registered or not flat_param.requires_grad:\n continue\n # Get the `AccumulateGrad` object\n temp_flat_param = flat_param.expand_as(flat_param)\n p_assert(\n temp_flat_param.grad_fn is not None,\n \"The `grad_fn` is needed to access the `AccumulateGrad` and \"\n \"register the post-backward hook\",\n )\n acc_grad = temp_flat_param.grad_fn.next_functions[0][0]\n hook_handle = acc_grad.register_hook(\n functools.partial(_post_backward_hook, state, handle)\n )\n flat_param._post_backward_hook_state = (acc_grad, hook_handle) # type: ignore[attr-defined]",
"def _register_post_backward_hooks(\n self,\n handles: List[FlatParamHandle],\n ) -> None:\n # If there is no gradient computation, then there is no need for\n # post-backward logic\n if not torch.is_grad_enabled():\n return\n for handle in handles:\n flat_param = handle.flat_param\n already_registered = hasattr(flat_param, \"_post_backward_hook_state\")\n if already_registered or not flat_param.requires_grad:\n continue\n # Get the `AccumulateGrad` object\n temp_flat_param = flat_param.expand_as(flat_param)\n p_assert(\n temp_flat_param.grad_fn is not None,\n \"The `grad_fn` is needed to access the `AccumulateGrad` and \"\n \"register the post-backward hook\"\n )\n acc_grad = temp_flat_param.grad_fn.next_functions[0][0]\n hook_handle = acc_grad.register_hook(\n functools.partial(self._post_backward_hook, handle)\n )\n flat_param._post_backward_hook_state = (acc_grad, hook_handle) # type: ignore[attr-defined]",
"def backward_gradient(\n self, input: np.ndarray, head_gradients: Dict[str, np.ndarray]\n ) -> np.ndarray:\n raise NotImplementedError",
"def _register_pre_backward_hooks(\n state: _State,\n outputs: Any,\n handles: List[FlatParamHandle],\n) -> None:\n # If there is no gradient computation, then there is no need for\n # pre-backward logic\n if not torch.is_grad_enabled():\n return outputs\n if state._is_root:\n state._post_backward_callback_queued = False # only defined on the root\n\n handles_key = tuple(handles)\n if handles_key:\n # Since these handles' `FlatParameter`s participated in a forward, we\n # conservatively assume that they will be used in the backward\n state._needs_pre_backward_unshard[handles_key] = False\n state._ran_pre_backward_hook[handles_key] = False\n\n def _register_hook(t: torch.Tensor) -> torch.Tensor:\n if t.requires_grad:\n t.register_hook(functools.partial(_pre_backward_hook, state, handles))\n state._needs_pre_backward_unshard[handles_key] = True\n return t\n\n return _apply_to_tensors(_register_hook, outputs)",
"def on_backward_end(self, batch):\n if self.updater == \"backward\":\n grads = OrderedDict((name, param.grad.data.cpu(\n )) for name, param in self.model.model.named_parameters() if param.grad is not None)\n try:\n self.update(grads)\n except KeyboardInterrupt:\n raise\n except:\n pass",
"def backward(ctx, grad_output):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n # Retrieve saved tensors and constants\n gamma, ivar, mean, input = ctx.saved_tensors\n eps = ctx.saved_tensors\n\n # Check which inputs need gradients\n input_needs_grad, gamma_needs_grad, beta_needs_grad = ctx.needs_input_grad\n\n # Get the batch size (=N)\n N, _ = grad_output.shape\n\n # reconstruct the input_norm\n input_norm = (input - mean) * ivar\n grand_input_norm = grad_output * gamma\n\n ##### Gradient wrt beta #####\n grad_beta = grad_output.sum(dim=0) if beta_needs_grad else None\n\n #### Gradient wrt gamma ####\n grad_gamma = (input_norm*grad_output).sum(dim=0) if gamma_needs_grad else None\n \n #### Gradient wrt input ####\n term1 = N*grand_input_norm \n term2 = torch.sum(grand_input_norm, dim=0)\n term3 = input_norm*torch.sum(grand_input_norm*input_norm, dim=0)\n grad_input = (1. / N) * ivar * (term1 - term2 - term3) if input_needs_grad else None\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n # return gradients of the three tensor inputs and None for the constant eps\n return grad_input, grad_gamma, grad_beta, None",
"def backward(self, gradient):\n #TODO\n pass",
"def backward(self, gradient):\n #TODO\n pass",
"def backward(ctx, G):\n backend = ctx.backend\n aliases = ctx.aliases\n formula = ctx.formula\n signature = ctx.signature\n sum_index = ctx.sum_index\n args = ctx.saved_tensors # Unwrap the saved variables\n\n # number of arguments (including parameters)\n nvars = 0;\n for sig in signature[1:]:\n nvars += 1\n\n # If formula takes 5 variables (numbered from 0 to 4), then the gradient\n # wrt. the output, G, should be given as a 6-th variable (numbered 5),\n # with the same dim-cat as the formula's output.\n eta = \"Var(\" + str(nvars) + \",\" + str(signature[0][0]) + \",\" + str(signature[0][1]) + \")\"\n grads = [] # list of gradients wrt. args;\n arg_ind = 5 # current arg index (4 since backend, ... are in front of the tensors); \n var_ind = 0 # current Variable index;\n\n for sig in signature[1:]: # Run through the actual parameters, given in *args in the forward.\n if not ctx.needs_input_grad[arg_ind]: # If the current gradient is to be discarded immediatly...\n grads.append(None) # Don't waste time computing it.\n else: # Otherwise, the current gradient is really needed by the user:\n # adding new aliases is waaaaay too dangerous if we want to compute\n # second derivatives, etc. So we make explicit references to Var<ind,dim,cat> instead.\n var = \"Var(\" + str(var_ind) + \",\" + str(sig[0]) + \",\" + str(sig[1]) + \")\" # V\n formula_g = \"Grad(\" + formula + \",\" + var + \",\" + eta + \")\" # Grad<F,V,G>\n args_g = args + (G,) # Don't forget the gradient to backprop !\n \n # N.B.: if I understand PyTorch's doc, we should redefine this function every time we use it?\n genconv = GenericSum().apply\n\n if sig[1] == 2: # we're referring to a parameter, so we'll have to sum both wrt 'i' and 'j'\n sumindex_g = 1 # The first sum will be done wrt 'i'\n signature_g = [ [sig[0],1] ] + signature[1:] + signature[:1]\n grad = genconv(backend, aliases, formula_g, signature_g, sumindex_g, *args_g)\n # Then, sum 'grad' wrt 'j' :\n # I think that \".sum\"'s backward introduces non-contiguous arrays,\n # and is thus non-compatible with GenericSum:\n # grad = grad.sum(0) \n # We replace it with a \"handmade hack\" :\n grad = Variable(torch.ones(1, grad.shape[0]).type_as(grad.data)) @ grad\n grad = grad.view(-1)\n else :\n # sumindex is \"the index that stays in the end\", not \"the one in the sum\"\n # (It's ambiguous, I know... But it's the convention chosen by Joan, which makes\n # sense if we were to expand our model to 3D tensors or whatever.)\n sumindex_g = sig[1] # The sum will be \"eventually indexed just like V\".\n signature_g = [sig] + signature[1:] + signature[:1]\n grad = genconv(backend, aliases, formula_g, signature_g, sumindex_g, *args_g)\n grads.append(grad)\n\n # increment the Variable counts\n arg_ind += 1 ; var_ind += 1 \n\n # Grads wrt. backend, aliases, formula, signature, sum_index, *args\n return (None, None, None, None, None, *grads)",
"def backward(self, gradient):\n raise NotImplementedError()",
"def backward(ctx, grad_output):\n\n # This is a pattern that is very convenient - at the top of backward\n # unpack saved_tensors and initialize all gradients w.r.t. inputs to\n # None. Thanks to the fact that additional trailing Nones are\n # ignored, the return statement is simple even when the function has\n # optional inputs.\n # input, weight, bias = ctx.saved_variables\n\n return grad_output",
"def layer_backward(d_output, cache):\n\n # Unpack cache values\n x, w, z, output = cache\n\n # Compute derivatives (gradients)\n d_x, d_w = None, None\n\n return d_x, d_w",
"def _register_pre_backward_hooks(\n self,\n outputs: Any,\n handles: List[FlatParamHandle],\n ) -> Any:\n # If there is no gradient computation, then there is no need for\n # pre-backward logic\n if not torch.is_grad_enabled():\n return outputs\n\n if self._is_root:\n self._post_backward_callback_queued = False # only defined on the root\n\n handles_key = tuple(handles)\n if handles_key:\n # Since these handles' `FlatParameter`s participated in a forward,\n # we conservatively assume that they will be used in the backward\n self._needs_pre_backward_unshard[handles_key] = False\n self._ran_pre_backward_hook[handles_key] = False\n\n def _pre_backward_hook(_handles: List[FlatParamHandle], *unused: Any) -> None:\n \"\"\"Prepares ``_handles`` 's ``FlatParameter`` s for gradient\n computation.\"\"\"\n _handles_key = tuple(_handles) # avoid shadowing `handles_key`\n # Only run the pre-backward hook once per group of handles involved\n # in the same module forward computation\n if _handles_key and self._ran_pre_backward_hook.get(_handles_key, False):\n return\n\n with torch.autograd.profiler.record_function(\n \"FullyShardedDataParallel._pre_backward_hook\"\n ):\n # Queue the post-backward callback once for the root FSDP\n # instance to attach it to the outermost backward graph task so\n # that it is called after all backward calls complete\n if self._is_root and not self._post_backward_callback_queued:\n self._queue_wait_for_post_backward()\n elif _handles_key:\n self._assert_state([TrainingState_.IDLE])\n self.training_state = TrainingState_.BACKWARD_PRE\n # Queueing the post-backward callback is the only logic that is\n # not per-handle in the pre-backward hook, so we can return\n # early here if there are no handles.\n if not _handles_key:\n return\n for handle in _handles:\n handle._training_state = HandleTrainingState.BACKWARD_PRE\n\n # If the handles have been prefetched, this `_unshard()` simply\n # switches to using the unsharded parameter\n self._unshard(_handles)\n torch.cuda.current_stream().wait_stream(self._streams[\"all_gather\"])\n\n # Set this to `False` to ensure that a mistargeted prefetch\n # does not actually unshard these handles\n self._needs_pre_backward_unshard[_handles_key] = False\n self._prefetch_handles(_handles_key)\n for handle in _handles:\n handle.prepare_gradient()\n self._ran_pre_backward_hook[_handles_key] = True\n\n def _register_hook(t: torch.Tensor) -> torch.Tensor:\n if t.requires_grad:\n t.register_hook(functools.partial(_pre_backward_hook, handles))\n self._needs_pre_backward_unshard[handles_key] = True\n return t\n\n return _apply_to_tensors(_register_hook, outputs)",
"def backward(self):\n gradient = blah\n return gradient",
"def backward(self):\n gradient = blah\n return gradient",
"def backward(self, inputs, gradients, **kwargs):\n grad_relu = inputs > 0\n return gradients * grad_relu",
"def backward(self, gradient: Tensor) -> Tensor:\n self.b_grad = np.sum(gradient, axis=0)\n self.w_grad = self.inputs.T @ gradient\n return gradient @ self.w.T",
"def _post_backward_hook(\n state: _State,\n handle: FlatParamHandle,\n *unused: Any,\n):\n param = handle.flat_param\n param._post_backward_called = True\n with torch.autograd.profiler.record_function(\n \"FullyShardedDataParallel._post_backward_hook\"\n ):\n _assert_in_training_states(state, [TrainingState.FORWARD_BACKWARD])\n state.training_state = TrainingState.FORWARD_BACKWARD\n p_assert(\n handle._training_state == HandleTrainingState.BACKWARD_PRE,\n f\"Expects `BACKWARD_PRE` state but got {handle._training_state}\",\n )\n handle._training_state = HandleTrainingState.BACKWARD_POST\n\n if param.grad is None:\n return\n if param.grad.requires_grad:\n raise RuntimeError(\"FSDP does not support gradients of gradients\")\n\n free_unsharded_flat_param = _should_free_in_backward(state, handle)\n _reshard(state, [handle], [free_unsharded_flat_param])\n\n # TODO: Post-backward prefetching does not support the multiple handles\n # per module case since the post-backward hook runs per handle, not per\n # group of handles.\n handles_key = (handle,)\n _prefetch_handles(state, handles_key)\n\n if not state._sync_gradients:\n return\n\n # Wait for all ops in the current stream (e.g. gradient\n # computation) to finish before reduce-scattering the gradient\n state._streams[\"post_backward\"].wait_stream(torch.cuda.current_stream())\n\n with torch.cuda.stream(state._streams[\"post_backward\"]):\n unsharded_grad_data = param.grad.data\n if state._exec_order_data.is_first_iter: # only check once\n _check_comm_hook(\n state._communication_hook, state._communication_hook_state\n )\n if handle._uses_reduce_mixed_precision and not _low_precision_hook_enabled(\n state\n ):\n # TODO: Use the low precision communication hook directly\n param.grad.data = param.grad.to(state.mixed_precision.reduce_dtype)\n\n if handle.uses_sharded_strategy:\n # We clear `.grad` to permit multiple backwards. This avoids a\n # race where the second backward pass computation precedes\n # ahead of the first backward pass reduction, which is possible\n # since the reduction is issued in a separate stream and is\n # async and would result in reducing the wrong gradient.\n unsharded_grad = param.grad.data\n param.grad = None\n p_assert(\n len(unsharded_grad.size()) == 1,\n f\"Expects gradient to be flattened but got {unsharded_grad.size()}\",\n )\n chunks = list(unsharded_grad.chunk(state.world_size))\n numel_to_pad = (\n state.world_size * chunks[0].numel() - unsharded_grad.numel()\n )\n padded_unsharded_grad = F.pad(unsharded_grad, [0, numel_to_pad])\n new_sharded_grad = torch.zeros_like(chunks[0]) # padded\n state._communication_hook(\n state._communication_hook_state,\n padded_unsharded_grad,\n new_sharded_grad,\n )\n _cast_grad_to_param_dtype(state, handle, new_sharded_grad, param)\n\n # Save the sharded gradient in `_saved_grad_shard` to support\n # gradient accumulation -- for multiple backwards, the gradient\n # reductions may happen in arbitrary order\n accumulate_grad = hasattr(param, \"_saved_grad_shard\")\n if accumulate_grad:\n _check_grad_to_accumulate(new_sharded_grad, param._saved_grad_shard)\n param._saved_grad_shard += new_sharded_grad\n else:\n param._saved_grad_shard = new_sharded_grad\n sharded_grad = param._saved_grad_shard\n else:\n state._communication_hook(state._communication_hook_state, param.grad)\n # For `NO_SHARD`, we can keep the low precision gradients by\n # simply omitting the cast altogether\n if not handle._keep_low_precision_grads:\n _cast_grad_to_param_dtype(state, handle, param.grad, param)\n sharded_grad = param.grad.data\n\n if handle._config.offload_params:\n # Offload the gradient to CPU to ensure parameters and\n # gradients are on the same device as required by the optimizer\n param._cpu_grad.copy_( # type: ignore[attr-defined]\n sharded_grad.detach(), non_blocking=True\n ) # synchronized in the post-backward callback\n # Since the sharded gradient is produced in the post-backward\n # stream and consumed later in the computation stream, inform\n # the caching allocator\n sharded_grad.data.record_stream(torch.cuda.current_stream())\n\n # Since the unsharded gradient is produced in the computation\n # stream and consumed in the post-backward stream, inform the\n # caching allocator (before it goes out of scope)\n unsharded_grad_data.record_stream(state._streams[\"post_backward\"])\n\n if handle._use_orig_params:\n # Since the handle's `FlatParameter` completed its gradient\n # computation, we should reset the gradient noneness mask\n handle._reset_is_grad_none()\n # Delay using sharded gradient views until after the\n # reduce-scatter instead of immediately after resharding\n handle._use_sharded_grad_views()",
"def backward(ctx, grad_output):\n diff, = ctx.saved_tensors\n grad_input = grad_output.clone()\n grad_input = grad_input + diff\n return grad_input",
"def _wait_for_post_backward(self) -> None:\n assert self._is_root\n # Check if the root module has params and if any of them has\n # the `requires_grad` field set. If `requires_grad=False` for\n # all the params, the post_backward hook will not fire and the\n # state will remain in `TrainingState.BACKWARD_PRE`.\n if any([p.requires_grad for p in self.full_params]):\n self.assert_state(TrainingState.BACKWARD_POST)\n else:\n self.assert_state(TrainingState.BACKWARD_PRE)\n\n # A backward pass is done, clean up below.\n def _finalize_parameters(fsdp_module: XlaFullyShardedDataParallel) -> None:\n \"\"\"Helper used below on all fsdp modules.\"\"\"\n for p in fsdp_module.full_params:\n if not p.requires_grad:\n continue\n if hasattr(p, \"_shard_bwd_hook\"):\n assert len(p._shard_bwd_hook) == 2, len(p._shard_bwd_hook)\n p._shard_bwd_hook[1].remove()\n delattr(p, \"_shard_bwd_hook\")\n\n # Update root and nested FSDP's hooks and flags.\n for m in self.modules(): # includes self\n if isinstance(m, XlaFullyShardedDataParallel):\n _finalize_parameters(m)\n if not m._pre_backward_hook_has_run:\n m.assert_state(TrainingState.IDLE)\n # The module won't trigger post_backward_hook, so we free the\n # full params here.\n m._free_full_params(\n m.full_params,\n apply_opt_barrier=self.optimization_barrier_in_backward)\n elif any(p.requires_grad for p in m.parameters()):\n # Check if the module has params and if any of them has\n # the `requires_grad` field set. If `requires_grad=False` for\n # all the params, the post_backward hook will not fire and the\n # state will remain in `TrainingState.BACKWARD_PRE`.\n if any([p.requires_grad for p in m.full_params]):\n m.assert_state(TrainingState.BACKWARD_POST)\n else:\n m.assert_state(TrainingState.BACKWARD_PRE)\n else:\n # When `m` and its children has no params or has params but\n # none with `requires_grad==True`, there are two cases:\n # 1. output tensors are `requires_grad==True`. In this case,\n # pre-backward hook is still registered, so it is in BACKWARD_PRE state.\n # 2. output tensors are `requires_grad==False`. In this case,\n # pre-backward hook is not registered, so it is in IDLE state.\n m.assert_state([TrainingState.BACKWARD_PRE, TrainingState.IDLE])\n\n m.training_state = TrainingState.IDLE\n m._pre_backward_hook_has_run = False\n if m._is_root:\n # reset this flag for cases like \"one forward pass + multiple backward passes\"\n self._post_backward_callback_queued = False\n # clear this list for next iteration\n assert self._output_pre_backward_hook_registered is not None\n self._output_pre_backward_hook_registered.clear()\n if self.optimization_barrier_in_backward:\n # Ensure that backward pass ops of feature gradients, parameter\n # gradient and sharding, and full-param freeing (which are usually\n # performed in previous modules and are registered to\n # self._backward_opt_barrier_tensors in _grad_opt_barrier_hook,\n # _pre_backward_hook, and _post_backward_hook) are finished before\n # accessing the sharded gradients of this FSDP module.\n params_with_grad = [\n p for p in self._all_sharded_params if p.grad is not None\n ]\n grad_data = [p.grad for p in params_with_grad]\n dependency_tensors = params_with_grad + grad_data\n dependency_tensors.extend(self._backward_opt_barrier_tensors)\n self.optimization_barrier_op(dependency_tensors)\n self._clear_backward_opt_barrier_lists()\n\n if self.mark_step_on_finalization:\n # Forcing an execution at the end of backward pass to avoid any XLA compiler\n # fusion between backward and optimizer (e.g. AdamW and SGD) step.\n # Here `xm.mark_step` is only called once for the entire backward pass and\n # should therefore only moderately increase the execution time.\n # It may help prevent undesired fusion in backward pass and save more memory.\n if self._debug_print:\n xm.master_print(\n f\"mark_step called in FSDP _wait_for_post_backward (_debug_msg: {self._debug_msg})\",\n flush=True,\n )\n xm.mark_step()",
"def word_embedding_backward(dout, cache):\n dW = None\n ##############################################################################\n # TODO: Implement the backward pass for word embeddings. #\n # #\n # HINT: Look up the function np.add.at #\n ##############################################################################\n x, W = cache\n # create a copy since add.at changes the matrix\n W_new = W.copy()\n # it is just adding the derivates specified in dout at proper index\n # x gives the indices . dout gives the derivates that needs to be added.\n np.add.at(W_new, x, dout)\n dW = W_new - W\n ##############################################################################\n # END OF YOUR CODE #\n ##############################################################################\n return dW",
"def backward(ctx, grad_L):\n A, T = ctx.saved_tensors\n\n grad_A = None\n grad_T = None\n\n B = A.shape[0]\n\n # We only need to compute gradients for tensors that are flagged to\n # require gradients!\n if ctx.needs_input_grad[0]:\n grad_A = (A - T) / B\n\n if ctx.needs_input_grad[1]:\n grad_T = (T - A) / B\n\n return grad_A, grad_T",
"def backward(self, inputs, grad_loss_input):\n raise NotImplementedError",
"def forward_backward(self, data_batch):\n self.forward(data_batch, is_train=True)\n self.backward()\n if self.use_l2norm_grad_clip:\n # 2-Norm Grad Clip\n self.l2norm_grad_clip()",
"def apply_gradients(self,\n grads_and_vars,\n global_step=None,\n name=None,\n decay_var_list=None):\n self._decay_var_list = set(decay_var_list) if decay_var_list else False\n return super(DecoupledWeightDecayExtension, self).apply_gradients(\n grads_and_vars, global_step=global_step, name=name)",
"def backward(self, inGradient, lr=0.001): # batchSize = 1\n wGradient = np.dot(inGradient.T, self.data)\n bGradient = np.sum(inGradient, axis=0)\n outGradient = np.dot(inGradient, self.weights)\n\n self.weights = self.weights - lr * wGradient\n self.bias = self.bias - lr * bGradient\n self.wGradient = wGradient\n self.bGradient = bGradient\n\n #print \"weight gradient \", wGradient\n #print \"bias gradient \", bGradient\n\n return outGradient"
] | [
"0.744036",
"0.7207515",
"0.68466437",
"0.6619593",
"0.6545222",
"0.65270805",
"0.6505459",
"0.64318466",
"0.6385007",
"0.63628876",
"0.63604456",
"0.63604456",
"0.62412906",
"0.6231469",
"0.62253386",
"0.61755383",
"0.609307",
"0.60908824",
"0.60908824",
"0.60785884",
"0.6035776",
"0.59760696",
"0.59733164",
"0.5969636",
"0.5931047",
"0.5928084",
"0.59047455",
"0.5880499",
"0.5870969",
"0.5865129"
] | 0.8470942 | 0 |
some tokenizers don't have 'eos_token' and 'bos_token' attributes. Thus, we need some trick to get them. | def special_tokens(self, ):
if self.tokenizer.bos_token is None or self.tokenizer.eos_token is None:
special_tokens = self.tokenizer.build_inputs_with_special_tokens([])
special_tokens_ids = self.tokenizer.convert_ids_to_tokens(special_tokens)
self.tokenizer.bos_token, self.tokenizer.eos_token = special_tokens_ids
special_tokens = self.tokenizer.eos_token, self.tokenizer.bos_token
return special_tokens | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def synth_tokens(self):\n if self.lliagraph:\n return self.lliagraph.synth_tokens.items()\n else:\n return []",
"def parse(self, tokenizer):\n pass",
"def tokens():\n pass",
"def get_tokens(self, document):\n raise NotImplementedError()",
"def get_tokens(self):\r\n return self.token_set",
"def get_tokens(data_clean):\n #sentence tokenization\n data_sent = sent_tokenize(data_clean)\n #tokenizer\n data_tokenized_punc = [word for sent in data_sent for word in nltk.word_tokenize(sent)]\n data_word = [word.lower() for word in data_tokenized_punc if word.isalpha()]\n\n return data_word, data_sent",
"def getTokens(self):\n return self.__token",
"def bos_token(self):\r\n if self._bos_token is None:\r\n logger.error(\"Using bos_token, but it is not set yet.\")\r\n return self._bos_token",
"def __get_token_data__(self):\n raise Exception(\"Implement me!\")",
"def __init__(self):\n self.tokens = []",
"def eos_token(self):\r\n if self._eos_token is None:\r\n logger.error(\"Using eos_token, but it is not set yet.\")\r\n return self._eos_token",
"def getTokens(self):\n # NOTE: seems to be used by the evitaNominalTrainer only\n tokenList = []\n for chunkOrToken in self.dtrs:\n if chunkOrToken.isToken():\n tokenList += [chunkOrToken]\n elif chunkOrToken.isChunk():\n tokenList += chunkOrToken.dtrs\n else:\n logger.warn(\"Sentence element that is not a chunk or token\")\n return tokenList",
"def _gettoken(c,chars,knownsigils):\n verbose = False\n token = None\n if (c!= \"end\"):\n toktext = []\n matches = knownsigils[c][0]\n toktype = knownsigils[c][1]\n if verbose: print(\"BEF toktype:\",toktype,\" matches:\",matches)\n while (True):\n c = next(chars, \"end\")\n if verbose: print(\"c->\",c)\n if c in matches:\n toktext.append(c)\n else:\n break\n if verbose: print(\"AFT toktype:\",toktype,\" toktext:\",toktext)\n token = (''.join(toktext), toktype)\n return (c,token)",
"def init_tokens(self):\n raise NotImplementedError('Abstract method.')",
"def token(self) -> str:",
"def tokens(self):\n return self.__tokens",
"def tokenize(G, w):\n if not w:\n return [G.EOF]\n\n w = normalize(w)\n w = w[:-1].split(' ')\n \n f = G.symbDict\n\n tokens = []\n for token in w:\n if f.get(token) and f[token].IsTerminal:\n tokens.append(f[token])\n else:\n return \"token no definido: \" + token\n tokens.append(G.EOF)\n return tokens",
"def token_key(token):\n morphotagged = analysis(token).get('raw')\n lemma_pos = (analysis(token).get('lemma'), analysis(token).get('partOfSpeech'))\n return morphotagged or lemma_pos",
"def tokens(self):\n tokens = [k for k in self.tok2ind.keys()\n if k not in {'<NULL>', '<UNK>'}]\n return tokens",
"def get_tokens(self):\n\t\treturn self.get_starttokens() + self.get_endtokens()",
"def tokens(self):\n return self._tokens",
"def tokens(self):\n return self._tokens",
"def tokens(self):\n return self._tokens",
"def tokens(self):\r\n return self.iter_tokens(self._blob)",
"def get_token(self):\n key = self.kwargs.get(self.token_field_name, '').strip()\n if key in EMPTY_VALUES:\n key = self.request.GET.get(self.token_field_name, '').strip()\n if key in EMPTY_VALUES:\n key = self.request.POST.get(self.token_field_name, '').strip()\n if key in EMPTY_VALUES:\n key = None\n return key",
"def test_tokenization():\n X = Tokenizer().transform([[\"A test\"]])\n assert X[\"corpus\"][0] == [\"A\", \"test\"]",
"def get_tokenizer_and_model(model_name: str):\r\n tokenizer = AutoTokenizer.from_pretrained(model_name)\r\n model = AutoModel.from_pretrained(model_name)\r\n model.output_hidden_states = True\r\n return tokenizer, model",
"def token(self):\n print(\"getter of token called\")\n return self._token",
"def _next_tokens(self, head):\n state = head.state\n input_str = self.input_str\n position = head.position\n actions = state.actions\n in_len = len(input_str)\n tokens = []\n\n # add special STOP token if they are applicable\n if STOP in actions:\n if not self.consume_input \\\n or (self.consume_input and position == in_len):\n tokens.append(STOP_token)\n\n if position < in_len:\n # Get tokens by trying recognizers - but only if we are not at\n # the end, because token cannot be empty\n if self.custom_token_recognition:\n def get_tokens():\n return self._token_recognition(head)\n\n custom_tokens = self.custom_token_recognition(\n head, get_tokens,\n )\n if custom_tokens is not None:\n tokens.extend(custom_tokens)\n else:\n tokens.extend(self._token_recognition(head))\n\n # do lexical disambiguation if it is enabled\n if self.lexical_disambiguation:\n tokens = self._lexical_disambiguation(tokens)\n\n return tokens",
"def tokens(self):\n\t\tlabels_and_synonyms = list(itertools.chain.from_iterable(list(self.term_to_tokens.values())))\n\t\ttokens = set(list(itertools.chain.from_iterable([word_tokenize(x) for x in labels_and_synonyms])))\n\t\treturn(list(tokens))"
] | [
"0.6021182",
"0.5951054",
"0.58588576",
"0.5789484",
"0.5745784",
"0.56824887",
"0.5654901",
"0.5648043",
"0.5638188",
"0.5631488",
"0.5630968",
"0.56133425",
"0.5593994",
"0.5592292",
"0.5575906",
"0.55579054",
"0.5544896",
"0.549606",
"0.54886556",
"0.54866934",
"0.5471206",
"0.5471206",
"0.5471206",
"0.546802",
"0.54580086",
"0.54476875",
"0.5431728",
"0.54306537",
"0.5413535",
"0.5402866"
] | 0.62158376 | 0 |
Compute the euclidean distance between each word in the vocab and each word in the source. | def _pairwise_distance(self, src_embeds, vocab_embeds, squared=False):
# compute square norm to avoid compute all the directions
vocab_sq_norm = vocab_embeds.norm(p=2, dim=-1) ** 2
src_sq_norm = src_embeds.norm(p=2, dim=-1) ** 2
# dot product
dot_product = self._pairwise_dot_product(src_embeds, vocab_embeds)
# reshape for broadcasting
vocab_sq_norm = vocab_sq_norm.unsqueeze(0).unsqueeze(0) # 1, 1, vocab size
src_sq_norm = src_sq_norm.unsqueeze(2) # batch, seq length, 1
# compute squared difference
sq_norm = vocab_sq_norm + src_sq_norm - 2 * dot_product
if squared:
return sq_norm
else:
# relu + epsilon for numerical stability
sq_norm = F.relu(sq_norm) + 1e-20
# take the square root
return sq_norm.sqrt() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist",
"def get_distance_metrics(source_embeddings, target_embeddings):\n cosine_avg, euclidean_avg = 0.0, 0.0\n for i in range(len(source_embeddings)):\n cosine_avg += cosine(source_embeddings[i], target_embeddings[i])\n euclidean_avg += euclidean(source_embeddings[i], target_embeddings[i])\n return (cosine_avg / len(source_embeddings)), (euclidean_avg / len(source_embeddings))",
"def distance(self, word1, word2):\n\n return scipy.spatial.distance.cosine(self.vectors.get(word1), self.vectors.get(word2))",
"def by_distance_vectors(self, string_1, string_2):\n string_1 = self.kywrds.by_frequency(string_1)\n string_2 = self.kywrds.by_frequency(string_2)\n model = self.doc2vec_model[0]\n doc_vec_1 = model.infer_vector(string_1)\n doc_vec_2 = model.infer_vector(string_2)\n return spatial.distance.cosine(doc_vec_1, doc_vec_2)",
"def diff(self, word1, word2):\n v = self._vecs[self._index[word1]] - self._vecs[self._index[word2]]\n return v / np.linalg.norm(v)",
"def euclidean_distance(x1: np.ndarray, x2: np.ndarray) -> float:\n return np.sqrt(np.square(x1 - x2).sum())",
"def euclidean_distance(s1,s2): \n tmpsum = 0\n \n for index,value in enumerate(s1):\n tmpsum += (s1[index]-s2[index])**2\n \n return math.sqrt(tmpsum)",
"def wordMoversDistance(model, document1, document2):\n # If pyemd C extension is available, import it.\n # If pyemd is attempted to be used, but isn't installed, ImportError will be raised in wmdistance\n from pyemd import emd\n # Remove out-of-vocabulary words.\n len_pre_oov1 = len(document1)\n len_pre_oov2 = len(document2)\n document1 = [token for token in document1 if token in model]\n document2 = [token for token in document2 if token in model]\n diff1 = len_pre_oov1 - len(document1)\n diff2 = len_pre_oov2 - len(document2)\n if diff1 > 0 or diff2 > 0:\n print('Remove ' + str(diff1) + ' and ' + str(diff2) + ' OOV words from document 1 and 2 ('\n 'respectively).')\n return float('inf')\n\n if not document1 or not document2:\n print(\"At least one of the documents had no words that were in the vocabulary. Aborting (returning \"\n \"inf).\")\n return float('inf')\n\n dictionary = Dictionary(documents=[document1, document2])\n vocab_len = len(dictionary)\n\n if vocab_len == 1:\n # Both documents are composed by a single unique token\n return 0.0\n\n # Sets for faster look-up.\n docset1 = set(document1)\n docset2 = set(document2)\n\n # Compute distance matrix.\n distance_matrix = zeros((vocab_len, vocab_len), dtype=double)\n for i, t1 in dictionary.items():\n if t1 not in docset1:\n continue\n\n for j, t2 in dictionary.items():\n if t2 not in docset2 or distance_matrix[i, j] != 0.0:\n continue\n\n # Compute Euclidean distance between word vectors.\n distance_matrix[i, j] = distance_matrix[j, i] = sqrt(np_sum((model[t1] - model[t2]) ** 2))\n\n if np_sum(distance_matrix) == 0.0:\n # `emd` gets stuck if the distance matrix contains only zeros.\n print('The distance matrix is all zeros. Aborting (returning inf).')\n return float('inf')\n\n def nbow(document):\n d = zeros(vocab_len, dtype=double)\n nbow = dictionary.doc2bow(document) # Word frequencies.\n doc_len = len(document)\n for idx, freq in nbow:\n d[idx] = freq / float(doc_len) # Normalized word frequencies.\n return d\n\n # Compute nBOW representation of documents.\n d1 = nbow(document1)\n d2 = nbow(document2)\n\n # Compute WMD.\n return emd(d1, d2, distance_matrix)",
"def euclidean(x,y):\n\tassert (isinstance(x, BayesNet) and isinstance(y, BayesNet)), 'Must pass in BayesNet objects.'\n\tassert (x==y), 'Passed-in BayesNet objects are not structurally equal.'\n\n\tdistance = np.sum( np.sqrt( ( x.flat_cpt() - y.flat_cpt() )**2 ) )\n\treturn distance",
"def euclidean_distance(x, y):\n return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))",
"def euclidean_metric(x, y):\n if len(x) != len(y):\n raise ValueError(\"Incompatible dimensions.\")\n return np.linalg.norm(x - y)\n \n # Or a slightly longer way:\n return np.sqrt(np.sum(np.subtract(x, y)**2))\n # Or the longest/worst way:\n total = 0\n for i in xrange(len(x)):\n term = x[i] - y[i]\n term = term**2\n total += term\n total = np.sqrt(total)\n return total",
"def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.square(np.subtract(x1, x2))))",
"def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)",
"def _distance_from_weights(self, data):\n input_data = array(data)\n weights_flat = self._weights.reshape(-1, self._weights.shape[2])\n input_data_sq = power(input_data, 2).sum(axis=1, keepdims=True)\n weights_flat_sq = power(weights_flat, 2).sum(axis=1, keepdims=True)\n cross_term = dot(input_data, weights_flat.T)\n return sqrt(-2 * cross_term + input_data_sq + weights_flat_sq.T)",
"def sentence_distance(sentence_a, sentence_b):\n \n sent_a = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_a+bigrams(sentence_a)+trigrams(sentence_a)], axis=0)\n sent_b = np.sum([projections[word_ids.get(word, 0)] \n if word in word_ids else [0] \n for word in sentence_b+bigrams(sentence_b)+trigrams(sentence_b)], axis=0)\n \n \n return float(cosine(sent_a, sent_b))",
"def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n\n distance = np.linalg.norm(x - y)\n\n return distance",
"def compute_euclidean_dist(vec1, vec2):\r\n assert len(vec1) == len(vec2)\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.sqrt(np.sum(np.square(vec2 - vec1)))",
"def distance(dest_words, page_words):\n dest_hist = histogram(dest_words)\n page_hist = histogram(page_words)\n\n\n # positive difference means the word appears more on the destination\n difference_hist = {}\n for word in dest_hist:\n difference_hist[word] = dest_hist[word] - page_hist.get(word, 0.0)\n\n dist = 0.0\n for word in difference_hist:\n dist += abs(difference_hist[word])\n return dist",
"def word_analogy(self):\n data = open(\"data/word_analogy_subset.en.ar.txt\").read().split('\\n')\n data = [x for x in data if len(x.split()) == 4]\n cnt = 0\n keys = list(self.embeddings_index.keys())\n vectors = np.array(list(self.embeddings_index.values()))\n norms = np.linalg.norm(vectors, axis=1)\n for i in data:\n i = self.preprocessor(i).split()\n try:\n v = self.embeddings_index[i[0]] - self.embeddings_index[i[1]] + self.embeddings_index[i[2]]\n except:\n continue\n unit = v / np.linalg.norm(v)\n dists = np.dot(vectors, unit) / norms\n best = np.argpartition(-dists, 10)[:10 + 1]\n best = best.take(np.argsort((-dists).take(best)))\n result = [(keys[sim], float(dists[sim]))\n for sim in best]\n sbv = result[:10]\n for j in sbv:\n if j[0] == i[3]:\n cnt += 1\n return cnt/ len(data)",
"def euclidean_distance(x: np.ndarray, y: np.ndarray) -> float:\n distance_vector: np.ndarray = x - y\n distance = compute_norm(distance_vector)\n return distance",
"def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)",
"def test_distances(self):\n distances = self.vectors.distances('dog.n.01', ['mammal.n.01', 'dog.n.01'])\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))\n\n distances = self.vectors.distances('dog.n.01')\n self.assertEqual(len(distances), len(self.vectors.vocab))\n self.assertTrue(np.allclose(distances[-1], 10.04756))",
"def euclidean_distance(x1, x2):\n return (x2[0] - x1[0])**2 + (x2[1] - x1[1])**2",
"def euclidean_distance(x1, x2):\n return np.sqrt(np.sum(np.power(x1 - x2, 2)))",
"def calcEuclideanDistance(d1, d2):\n #initiate empty list\n result = []\n #for each index in the list, each position in both list minus each other\n #and to the power of two. Add this in the result list\n for idx in range(len(d1)):\n result.append((d1[idx]-d2[idx])**2)\n\n #Return the square of the sum of all values in the result list\n return math.sqrt(sum(result))",
"def hellinger_distance(doca, docb, axis=1):\n return np.sum((doca**.5 - docb**.5)**2, axis=axis)",
"def EuclideanDistanceSq( self, a, b ):\n if not (type(a) == list or type(a) == Vector):\n a = [a]\n if not (type(b) == list or type(a) == Vector):\n b = [b]\n assert len(a) == len(b)\n sqDist = 0\n for x,y in zip(a,b):\n sqDist += (x-y)**2\n return sqDist",
"def _distorted_distance(self):\n distance = 0\n for i, pixel in enumerate(self.training_set):\n distance += self._euclid_distance(\n pixel, self.clusters[self.labels[i]], axis=0)\n return distance",
"def euclidean_distance(x1, x2):\n\tdistance = 0\n\t# Squared distance between each coordinate\n\tfor i in range(len(x1)):\n\t\tdistance += pow((x1[i], x2[i]), 2)\n\treturn math.sqrt(distance)",
"def euclidean_distance(a, b):\n return np.linalg.norm(a - b)"
] | [
"0.7123041",
"0.6798625",
"0.6672034",
"0.64250857",
"0.6401553",
"0.6378859",
"0.63724816",
"0.6371111",
"0.6301999",
"0.6265582",
"0.6255312",
"0.62346095",
"0.62243164",
"0.62200135",
"0.62105745",
"0.6194787",
"0.6186329",
"0.6180648",
"0.6174867",
"0.61723065",
"0.61706024",
"0.61403537",
"0.60908735",
"0.6083031",
"0.60731995",
"0.60549587",
"0.6042237",
"0.60385555",
"0.6035777",
"0.6027672"
] | 0.747233 | 0 |
If TASK_USE_PATH is set rely on PATH to look for task binaries. Otherwise ../src/ is used by default. | def task_binary_location(cmd="task"):
return binary_location(cmd, TASK_USE_PATH) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_task_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tasks')",
"def test_taskmod_no_taskfile(modpath):\n sys.meta_path.append(TaskImporter())\n task = import_module(modpath)\n assert modpath in sys.modules\n assert sys.modules[modpath] is task\n assert task.__taskmodules__ == []",
"def get_celery_path():\n\n return get_executable_path('celery')",
"def TaskRelativeName(cls, task):\n if not task: return None\n return os.path.relpath(cls.TaskNormalizedName(task),\n PipelineConfig.Instance().pipeline_base_dir())",
"def task(path, **kwargs):\n\n # Get model configuration\n config = None\n if isinstance(path, (list, tuple)) and hasattr(path[0], \"config\"):\n config = path[0].config\n elif isinstance(path, str):\n config = AutoConfig.from_pretrained(path, **kwargs)\n\n # Attempt to resolve task using configuration\n task = None\n if config:\n architecture = config.architectures[0] if config.architectures else None\n if architecture:\n if any(x for x in [\"LMHead\", \"CausalLM\"] if x in architecture):\n task = \"language-generation\"\n elif \"QuestionAnswering\" in architecture:\n task = \"question-answering\"\n elif \"ConditionalGeneration\" in architecture:\n task = \"sequence-sequence\"\n\n return task",
"def find_taskfile(self):\n filename = self.cmdline.file\n curdir = self.cmdline.dir\n\n if \"load\" in self.cmdline.verbose:\n self.env.errorln(\"Taskrun search directory: {0}\".format(curdir))\n self.env.errorln(\"Taskrun search filename: {0}\".format(filename))\n self.env.errorln(\"Taskrun walk path: {0}\".format(str(self.cmdline.walk)))\n\n self.taskfile = None\n while True:\n taskfile = os.path.join(curdir, filename)\n if os.path.isfile(taskfile):\n if \"load\" in self.cmdline.verbose:\n self.env.errorln(\"Task file found: {0}\".format(taskfile))\n self.taskfile = taskfile\n return\n\n if not self.cmdline.walk:\n return\n\n (head, _) = os.path.split(curdir)\n if head and head != curdir:\n curdir = head\n else:\n break",
"def test_taskmod_taskfiles_only(monkeypatch, modpath):\n\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n task = import_module(modpath)\n\n assert modpath in sys.modules\n assert sys.modules[modpath] is task\n assert task.__taskmodules__ == pypath\n for n in names:\n assert hasattr(task, n)\n assert getattr(task, n).TEST == '{}.{}'.format(modpath, n)",
"def get_python():\n return path.join(TaskCreator.bin_dir, \"python\")",
"def prepare_taskfile(taskfile):\n path = os.path.dirname(taskfile)\n taskmodulename = os.path.splitext(os.path.basename(taskfile))[0]\n logging.info(\"Loading task file %s from %s\", taskmodulename, path)\n fp, pathname, description = imp.find_module(taskmodulename, [path])\n try:\n return imp.load_module(taskmodulename, fp, pathname, description)\n finally:\n if fp: \n fp.close()",
"def test_findtasks_none(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n # monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n # FakeModuleWithTasks)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfile = 'a_0.py'\n\n sys.meta_path.append(TaskImporter(taskfile))\n taskmod = import_module(modpath)\n\n assert hasattr(taskmod, '__tasks__')\n assert taskmod.__tasks__ == []",
"def discover_tasks(app):\n\n task_arguments.add_argument(\n \"preload-defaults-from-site\",\n type=str,\n required=False,\n default=\"\",\n choices=preload_defaults_from_site_choices,\n help=\"Select site within environment to load defaults from, argument format is <environment_name>/<site_name>\",\n )\n\n for tasks_base_dir in app.config[\"JINJAMATOR_TASKS_BASE_DIRECTORIES\"]:\n for file_ext in [\"py\", \"j2\"]:\n for tasklet_dir in glob.glob(\n os.path.join(tasks_base_dir, \"**\", f\"*.{file_ext}\"), recursive=True\n ):\n task_dir = os.path.dirname(tasklet_dir)\n append = True\n for dir_chunk in task_dir.replace(tasks_base_dir, \"\").split(\n os.path.sep\n ): # filter out hidden directories\n if dir_chunk.startswith(\".\") or dir_chunk in [\"__pycache__\"]:\n append = False\n break\n\n dir_name = task_dir.replace(tasks_base_dir, \"\")[1:]\n if append and dir_name not in available_tasks_by_path:\n\n task_id = xxhash.xxh64(task_dir).hexdigest()\n\n task_info = {\n \"id\": task_id,\n \"path\": dir_name,\n \"base_dir\": tasks_base_dir,\n \"description\": get_section_from_task_doc(task_dir)\n or \"no description\",\n }\n available_tasks_by_path[dir_name] = task_info\n try:\n task = JinjamatorTask()\n log.debug(app.config[\"JINJAMATOR_FULL_CONFIGURATION\"])\n task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n\n task.load(\n os.path.join(task_info[\"base_dir\"], task_info[\"path\"])\n )\n with app.app_context():\n data = json.loads(\n jsonify(\n task.get_jsonform_schema()[\"schema\"]\n ).data.decode(\"utf-8\")\n )\n task_models[task_info[\"path\"]] = api.schema_model(task_id, data)\n del task\n\n log.info(f\"registered model for task {task_dir}\")\n\n dynamic_role_name = f\"task_{dir_name}\"\n new_role = JinjamatorRole(name=dynamic_role_name)\n\n with app.app_context():\n db.session.add(new_role)\n try:\n db.session.commit()\n except Exception:\n pass\n\n @ns.route(f\"/{task_info['path']}\", endpoint=task_info[\"path\"])\n class APIJinjamatorTask(Resource):\n @api.doc(\n f\"get_task_{task_info['path'].replace(os.path.sep,'_')}_schema\"\n )\n @api.expect(task_arguments)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def get(self):\n \"\"\"\n Returns the json-schema or the whole alpacajs configuration data for the task\n \"\"\"\n\n args = task_arguments.parse_args(request)\n schema_type = args.get(\"schema-type\", \"full\")\n try:\n preload_data = json.loads(\n args.get(\"preload-data\", \"{}\")\n )\n except TypeError:\n preload_data = {}\n preload_data = remove_redacted(preload_data)[1]\n environment_site = args.get(\n \"preload-defaults-from-site\"\n )\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n inner_task = JinjamatorTask()\n\n inner_task._configuration.merge_dict(\n app.config[\"JINJAMATOR_FULL_CONFIGURATION\"]\n )\n inner_task.configuration.merge_dict(preload_data)\n\n inner_task.load(relative_task_path)\n\n if environment_site not in [None, \"None\", \"\"]:\n inner_task._configuration[\n \"jinjamator_site_path\"\n ] = site_path_by_name.get(environment_site)\n inner_task._configuration[\n \"jinjamator_site_name\"\n ] = environment_site\n env_name, site_name = environment_site.split(\"/\")\n roles = [\n role[\"name\"]\n for role in g._user.get(\"roles\", [])\n ]\n if (\n f\"environment_{env_name}|site_{site_name}\"\n in roles\n or f\"environments_all\" in roles\n or f\"administrator\" in roles\n ):\n inner_task.configuration.merge_yaml(\n \"{}/defaults.yaml\".format(\n site_path_by_name.get(environment_site)\n )\n )\n else:\n abort(\n 403,\n f\"User neither has no role environment_{env_name}|site_{site_name} nor environments_all nor administrator. Access denied.\",\n )\n\n full_schema = inner_task.get_jsonform_schema()\n\n if schema_type in [\"\", \"full\"]:\n response = jsonify(full_schema)\n elif schema_type in [\"schema\"]:\n response = jsonify(full_schema.get(\"schema\", {}))\n elif schema_type in [\"data\"]:\n response = jsonify(full_schema.get(\"data\", {}))\n elif schema_type in [\"options\"]:\n response = jsonify(full_schema.get(\"options\", {}))\n elif schema_type in [\"view\"]:\n response = jsonify(full_schema.get(\"view\", {}))\n del inner_task\n return response\n\n @api.doc(\n f\"create_task_instance_for_{task_info['path'].replace(os.path.sep,'_')}\"\n )\n @api.expect(task_models[task_info[\"path\"]], validate=False)\n @api.doc(\n params={\n \"Authorization\": {\n \"in\": \"header\",\n \"description\": \"A valid access token\",\n }\n }\n )\n @require_role(\n role=or_(\n User.roles.any(\n JinjamatorRole.name == dynamic_role_name\n ),\n User.roles.any(JinjamatorRole.name == \"tasks_all\"),\n )\n )\n def post(self):\n \"\"\"\n Creates an instance of the task and returns the job_id\n \"\"\"\n\n from jinjamator.task.celery import run_jinjamator_task\n from jinjamator.daemon.database import db\n\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n data = request.get_json()\n job_id = str(uuid.uuid4())\n user_id = g._user[\"id\"]\n\n job = run_jinjamator_task.apply_async(\n [\n relative_task_path,\n data,\n data.get(\"output_plugin\", \"console\"),\n user_id,\n ],\n task_id=job_id,\n created_by_user_id=user_id,\n )\n\n db_job = list(\n db.session.query(DB_Job).filter(\n DB_Job.task_id == job.id\n )\n )\n db_job = db_job and db_job[0]\n if not db_job:\n db_job = DB_Job(job.id)\n db_job.status = \"SCHEDULED\"\n db_job.configuration = data\n db_job.jinjamator_task = relative_task_path\n db_job.created_by_user_id = user_id\n db.session.add(db_job)\n db.session.flush()\n db.session.commit()\n\n return jsonify({\"job_id\": job.id})\n\n if task_info[\"description\"]:\n post.__doc__ += task_info[\"description\"]\n get.__doc__ += task_info[\"description\"]\n\n except Exception as e:\n import traceback\n\n log.error(\n f\"unable to register {task_dir}: {e} {traceback.format_exc()}\"\n )",
"def app_tasks(name, path):\n @task(pre=reset_project.pre, name=\"reset_project\")\n def _reset_project(ctx):\n reset_project(ctx, path)\n\n _reset_project.__doc__ = \"Reset Mynewt project files for {}\".format(name)\n\n @task(pre=install_project.pre, name=\"install_project\")\n def _install_project(ctx):\n install_project(ctx, path)\n\n _install_project.__doc__ = \"Install Mynewt project dependencies for {}\".format(name)\n\n @task(pre=build.pre, name=\"build\")\n def _build(ctx, export_path=None, board=None):\n build(ctx, name, path, export_path, board)\n\n _build.__doc__ = \"Build {} for Pylon\".format(name)\n\n @task(pre=run.pre, name=\"run\")\n def _run(ctx, sn=None, board=None): # pylint: disable=C0103\n run(ctx, name, path, sn, board)\n\n _run.__doc__ = \"Flash and run {} on Pylon\".format(name)\n\n @task(pre=debug.pre, name=\"debug\")\n def _debug(ctx, sn=None, port=None, board=None): # pylint: disable=C0103\n debug(ctx, name, path, sn, port, board)\n\n _debug.__doc__ = \"Debug {} on Pylon\".format(name)\n\n return _install_project, _reset_project, _build, _run, _debug",
"def default_tasks():\n tasks = {'run': run, 'bash': bash}\n for entry_point in pkg_resources.iter_entry_points('jarbas_task'):\n tasks[entry_point.name] = entry_point.load()\n return tasks",
"def test_relative_paths(self):\n command_line = self._MENU + [\n \"some_pool\",\n \"../dev\",\n \"./fake\",\n \"/abc\",\n ]\n TEST_RUNNER(command_line)",
"def modpath():\n return 'loadlimit.task'",
"def _load_defined_tasks():\n task_path = Path(__file__).parent.resolve() / \"nalu_tasks\"\n py_files = glob.glob(str(task_path / \"[a-z]*.py\"))\n modset = {Path(ff).stem for ff in py_files}\n for pymod in modset:\n importlib.import_module(\".%s\"%pymod, 'exawind.nalu.nalu_tasks')",
"def task(ctx, config):\n pass",
"def generate_tasks(self, task):",
"def test_findtasks_found(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeModuleWithTasks)\n\n taskfile = 'a_0.py'\n\n sys.meta_path.append(TaskImporter(taskfile))\n taskmod = import_module(modpath)\n\n assert len(taskmod.__tasks__) == 1\n task = taskmod.__tasks__[0]\n assert task.__name__ == 'TestTask'\n assert isinstance(task, type)\n assert issubclass(task, TaskABC)",
"def GetTaskOutputRelativeDir(cls, task):\n task = os.path.dirname(cls.TaskRelativeName(task))\n if not task: return ''\n\n parts = task.split(os.sep)\n res_parts = []\n for part in parts:\n priority_name = part.split('_', 1)\n res_parts += [priority_name[1]]\n return os.sep.join(res_parts)",
"def test_taskfile_import(monkeypatch, modpath):\n monkeypatch.setattr(loadlimit.importhook, 'lstaskfiles', fake_lstaskfiles)\n monkeypatch.setattr(loadlimit.importhook, 'SourceFileLoader',\n FakeSourceFileLoader)\n\n taskfiles = ['a_{}.py'.format(i) for i in range(10)]\n names = [splitext(n)[0] for n in taskfiles]\n pypath = ['{}.{}'.format(modpath, n) for n in names]\n randpath = choice(pypath)\n\n assert modpath not in sys.modules\n assert all(not p.startswith(modpath) for p in sys.modules)\n\n sys.meta_path.append(TaskImporter(*taskfiles))\n taskfile = import_module(randpath)\n\n expected = set(pypath) | set([modpath])\n result = set(p for p in sys.modules if p.startswith(modpath))\n\n assert modpath in sys.modules\n assert result == expected\n assert taskfile.TEST == randpath",
"def set_executable_options(self, task):\n pass",
"def addTask(self, task):\n if isinstance(task, ShREEKTask):\n self._ShREEKConfig.addTask(task)\n return \n if type(task) == type(\"string\"):\n dirname = os.path.dirname(task)\n exename = os.path.basename(task)\n taskObject = ShREEKTask(Directory = dirname,\n Executable = exename)\n \n self._ShREEKConfig.addTask(taskObject)\n return \n \n msg = \"Unknown Task type added to ShREEKInterface\\n\"\n msg += \"\\t%s\\n\" % task\n msg += \"Argument must be a ShREEKTask Object or a path to\\n\"\n msg += \"an executable script\\n\"\n raise ShREEKException(msg, ClassInstance = self,\n BadObject = task)",
"def ns_foreach_task_subdir(c):\n from slugify import slugify\n from metapack_build.tasks.package import make_ns\n\n for d in _build_order(c):\n print(\"⏩ \", d)\n incl_path = d.joinpath('tasks.py')\n\n if not incl_path.exists():\n continue\n\n module_name = f'tasks.{slugify(d.name)}'\n\n make_ns() # Reset the package namespace\n\n spec = importlib.util.spec_from_file_location(module_name, incl_path)\n sp_tasks = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(sp_tasks)\n\n curdir = os.getcwd()\n\n os.chdir(d)\n\n try:\n yield sp_tasks.ns\n except AttributeError as e:\n if module_name not in str(e):\n raise\n finally:\n os.chdir(curdir)",
"def TaskDirName(cls, task):\n if not task: return None\n return os.path.dirname(task)",
"def task_4_2_1():\n # TODO Task 4.2.1: Your code goes here\n pass",
"def get_pytest():\n return path.join(TaskCreator.bin_dir, \"py.test\")",
"def test_py_task_config(exopy_qtbot, task_workbench):\n plugin = task_workbench.get_plugin('exopy.tasks')\n\n root = RootTask()\n config = PyTaskConfig(manager=plugin,\n task_class=plugin.get_task('exopy.ComplexTask'),\n future_parent=root)\n\n assert config.task_name\n assert config.ready\n assert config.task_doc\n\n config.task_name = ''\n assert not config.ready\n\n config.task_name = 'Test'\n assert config.ready\n task = config.build_task()\n assert task.name == 'Test'\n\n root.add_child_task(0, task)\n config2 = PyTaskConfig(manager=plugin,\n task_class=plugin.get_task('exopy.ComplexTask'),\n future_parent=root)\n\n config2.task_name = 'Test'\n assert not config2.ready\n\n config2.task_name = 'ADifferentName'\n assert config2.ready\n\n plugin.auto_task_names = []\n config = PyTaskConfig(manager=plugin,\n task_class=plugin.get_task('exopy.ComplexTask'),\n future_parent=root)\n\n assert not config.task_name\n assert not config.ready\n\n show_and_close_widget(exopy_qtbot, PyConfigView(config=config))\n show_and_close_widget(exopy_qtbot, PyConfigView(config=config, loop=True))",
"def task(*args, **kwargs):\n print(f\"task declared, args: {args}, kwargs:{kwargs}\")\n return FalseCeleryApp",
"def task_4_3_2():\n # TODO Task 4.3.2: Your code goes here\n pass"
] | [
"0.6477157",
"0.57612556",
"0.5689633",
"0.56765836",
"0.55889344",
"0.55805033",
"0.55210114",
"0.5507863",
"0.5440326",
"0.5436243",
"0.54087734",
"0.5403501",
"0.5345792",
"0.5335856",
"0.53229564",
"0.5279437",
"0.5270501",
"0.5263024",
"0.5223266",
"0.52160585",
"0.5170007",
"0.5158954",
"0.5104897",
"0.509643",
"0.50870895",
"0.50826484",
"0.5057847",
"0.5048503",
"0.5047269",
"0.5045533"
] | 0.71928936 | 0 |
If USE_PATH is True rely on PATH to look for binaries. Otherwise ../src/ is used by default. | def binary_location(cmd, USE_PATH=False):
if USE_PATH:
return cmd
else:
return os.path.join(BIN_PREFIX, cmd) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def linkpath(srcdir, pkg):\n home = os.getenv('HOME')\n if srcdir:\n rval = '{}/{}'.format(srcdir, pkg)\n else:\n rval = '{}/bin/{}'.format(home, pkg)\n return rval",
"def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)",
"def set_path():\n import os\n import sys\n\n sys.path.insert(0, os.path.join(os.path.dirname(__file__), \"..\"))",
"def set_proto_src(path):\n if sys.path.count(path) == 0:\n sys.path.append(path)",
"def load_libsrc():\n import sys\n ops_dir = os.path.dirname(os.path.realpath(__file__))\n fst_package = ops_dir + '/../lib_src/fst_pipeline'\n sys.path.append(fst_package)\n return",
"def test_relative_paths(self):\n command_line = self._MENU + [\n \"some_pool\",\n \"../dev\",\n \"./fake\",\n \"/abc\",\n ]\n TEST_RUNNER(command_line)",
"def path_which(args):\n print(header(\"$PATH Lookup: {}\".format(args.look)))\n loop_fmt = \"{color}{path}\"\n\n cnt = 0\n for part in os.environ[\"PATH\"].split(\":\"):\n color = u\"\"\n if args.color:\n color = CODES[cnt]\n\n msg = check_exec(part, args.look, args.version)\n if msg:\n print(header(loop_fmt.format(color=color, path=part), '-'))\n print(msg)\n cnt = (cnt + 1) % len(CODES)",
"def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'",
"def main():\n arg0 = sys.argv[0]\n if not os.path.isfile(arg0):\n sys.exit(\"sys.argv[0] is not a path to a file: \\\"\" + str(arg0) + \"\\\". Exiting now.\")\n absolute_path_to_file = os.path.realpath(arg0) # realpath follows symlinks, which is what we want in this case.\n absolute_path_to_src = os.path.dirname(absolute_path_to_file)\n (absolute_path_to_repo, src_dirname) = os.path.split(absolute_path_to_src)\n if src_dirname != \"src\":\n sys.exit(\"The driver script should be located in directory \\\"src\\\". It is instead in \\\"\" + src_dirname + \"\\\". Exiting now.\")\n os.chdir(absolute_path_to_repo)",
"def SearchPath(name, path=None):\n path = path or os.environ['PATH']\n for dir in path.split(os.pathsep):\n binpath = os.path.join(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None",
"def srcdir(path):\n if not workflow.included_stack:\n return None\n return workflow.current_basedir.join(path).get_path_or_uri()",
"def test_add_srcdirs_to_syspath(root_path: Path) -> None:\n add_srcdirs_to_syspath()\n\n # Test to see if runtime_syspath's 'src' directory in now in sys.path\n src_path: Path = root_path / \"src\"\n src_path_str: str = os.fspath(src_path)\n sys_paths: List[str] = list()\n found_src_path: bool = False\n syspath_member: str\n for syspath_member in sys.path:\n sys_paths.append(syspath_member)\n if src_path_str == syspath_member:\n found_src_path = True\n break\n\n if not found_src_path:\n msg: str = f\"{src_path.as_posix()} is not in:\"\n syspath_mem: str\n for syspath_mem in sorted(sys_paths):\n msg += f\"\\n\\t{Path(syspath_mem).as_posix()}\"\n pytest.fail(msg)",
"def syspath():\n import sys\n pprint(sys.path)",
"def activateLocalFastPath() -> None:\n global _FAST_PATH, _FAST_PATH_IS_TEMPORARY, APP_DATA\n\n # Try to fix pathing issues in Windows.\n if os.name == \"nt\":\n APP_DATA = APP_DATA.replace(\"/\", \"\\\\\")\n\n _FAST_PATH = os.path.join(\n APP_DATA,\n \"{}{}-{}\".format(\n MPI_RANK,\n os.environ.get(\"PYTEST_XDIST_WORKER\", \"\"), # for parallel unit testing,\n datetime.datetime.now().strftime(\"%Y%m%d%H%M%S%f\"),\n ),\n )\n\n _FAST_PATH_IS_TEMPORARY = True",
"def getPythonPath():\n python_path = os.environ.get(\"PYTHONPATH\",\"\")\n \n if os.path.basename(os.path.abspath(os.curdir)) == \"Test\":\n new_python_path = os.path.pathsep.join([\n python_path,os.path.normpath(\"../Lib/external/SQLObject-compat\"),\n os.path.normpath(\"../Lib/external\"),\n os.path.normpath(\"../Lib\"),\n ])\n else:\n new_python_path = os.path.pathsep.join([\n python_path,os.path.normpath(\"./Lib/external/SQLObject-compat\"),\n os.path.normpath(\"./Lib/external\"),\n os.path.normpath(\"./Lib\"),\n ])\n \n return new_python_path",
"def add_path(package):\n\n path_file_name = '../{0}/test/path.txt'.format(package)\n\n if os.path.exists(path_file_name):\n with open(path_file_name, 'r') as path_file:\n for directory in path_file.readlines():\n sys.path.insert(0, os.path.abspath(\n '../{0}/{1}'.format(package, directory.strip('\\n'))\n ))",
"def _whicha(cmd, paths=None):\n import os\n if paths is None:\n paths = os.environ['PATH'].split(':')\n possibilities = [os.path.expanduser(os.path.join(p, cmd)) for p in paths]\n return filter(lambda bin: os.path.exists(bin), possibilities)",
"def thepath = getProgramPath(theprog):\r\n\r\n theprog = lower(theprog);\r\n\r\n if strcmp(theprog,'POV-Ray')\r\n # install location for POV-Ray\r\n thepath = '/usr/local/bin';\r\n\r\n else if strcmp(theprog,'quietpov')\r\n # install location for the QuietPOV add-on\r\n thepath = 'C:\\Program Files\\POV-Ray for Windows v3.6\\guiext\\QuietPOV';\r\n\r\n else if strcmp(theprog,'imagemagick')\r\n # install location for ImageMagick\r\n thepath = '/home/kieran/Downloads/ImageMagick-6.8.5-8';\r\n\r\n else if strcmp(theprog,'ffmpeg')\r\n # install location for the ffmpeg library\r\n thepath = '/usr/bin/ffmpeg';\r\n\r\n else\r\n thepath = '';",
"def FindBinary( binary, user_options ):\n\n def _FindPath():\n key = '{0}_binary_path'.format( binary )\n if user_options.get( key ):\n return user_options[ key ]\n return GO_BINARIES.get( binary )\n\n binary_path = _FindPath()\n if os.path.isfile( binary_path ):\n return binary_path\n return None",
"def where(self, exe, path=None):\n if exe is None:\n return None\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n extlist = ['']\n\n def is_executable(path):\n return os.path.isfile(path) and os.access(path, os.X_OK)\n\n if sys.platform == 'win32':\n pathext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(exe)\n if ext.lower() not in pathext:\n extlist = pathext\n for ext in extlist:\n exe_name = exe + ext\n for p in paths:\n exe_path = os.path.join(p, exe_name)\n if is_executable(exe_path):\n return exe_path\n\n return None",
"def test_remote_sys_path(pytester: pytest.Pytester) -> None:\n pytester.makepyfile(\n \"\"\"\n import sys\n\n def test_sys_path():\n assert \"\" not in sys.path\n \"\"\"\n )\n result = pytester.runpytest(\"-n1\")\n assert result.ret == 0",
"def shared_binary_location(cmd=\"shared\"):\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)",
"def module_path() -> Path:\n if hasattr(sys, \"frozen\"):\n return Path(sys.executable).resolve().parent\n else:\n return (Path(__file__) / \"..\").resolve().parent",
"def init_env_path(path=None) -> None:\n if path is None:\n sys.path.insert(1, file_dir_dir())\n else:\n sys.path.insert(1, path)",
"def insert_package_path():\n sys.path.insert(0, ospdn(ospdn(ospdn(ospap(__file__)))))",
"def check_module_path(pkg):\n src_dir_root = ''\n print(\"[root-get] DEBUG: Checking module path\")\n check_module_name = os.system('find %s -mindepth 2 -type d -name \"%s\" ! -path \"*tutorials*\" ! -path \"*dictpch*\"' % (ROOT_SOURCES, pkg))\n if check_module_name != 0:\n print(\"Not a ROOT package (we are working only with ROOT packages for now.)\")\n return False\n else:\n # if have such directory in root then we can try to get it's real path\n path = PathChecker()\n src_dir_root = path.path4module(pkg, ROOT_SOURCES)\n if src_dir_root != None:\n print(\"[root-get] We would use a module from {0:s}\".format(src_dir_root))\n else:\n print(\"Package not present in rootbase.\")\n print(\"Please provide manifest file path, else enter 'NA'\")\n p_manifest = raw_input()\n if p_manifest != 'NA':\n value = yaml_validator(p_manifest)\n if value == 1:\n print(\"Not a valid yml. Please provide valid yml. Exiting now.\")\n else:\n print(\"Downloading package using url.\")\n dn_path = downloader(p_manifest)\n #get path for downloaded directory\n filepath = Path(dn_path + \"/CMakeLists.txt\")\n if filepath.is_file():\n src_dir_root = dn_path\n else:\n print(\"No CMakeLists.txt present. Creating using manifest.\")\n rule_name = re.compile(\".*name:.*\")\n with open(p_manifest) as mn:\n read = mn.read()\n name = rule_name.findall(read)\n parc_name = [x.lstrip(' name: ') for x in name]\n cml = open(dn_path + \"/CMakeLists.txt\", 'a')\n cml.write(\"ROOT_STANDARD_LIBRARY_PACKAGE(\" + parc_name[0] + \" DEPENDENCIES RIO)\")\n src_dir_root = dn_path\n\n else:\n print(\"Can you provide package path..(if available)\")\n dir_path = raw_input()\n filepath = Path(dir_path + \"/CMakeLists.txt\")\n if filepath.is_file():\n src_dir_root = dir_path\n else:\n print(\"No CMakeLists.txt present. Creating using manifest.\")\n rule_name = re.compile(\".*name:.*\")\n with open(p_manifest) as mn:\n read = mn.read()\n name = rule_name.findall(read)\n parc_name = [x.lstrip(' name: ') for x in name]\n cml = open(dn_path + \"/CMakeLists.txt\", 'a')\n cml.write(\"ROOT_STANDARD_LIBRARY_PACKAGE(\" + parc_name[0] + \" DEPENDENCIES RIO)\")\n src_dir_root = dn_path\n\n print(\"[root-get] We would use a module from {0:s}\".format(src_dir_root))\n return src_dir_root",
"def path(src, name='default'):\n try:\n return get_output(['hg', 'path', name], cwd=src).strip()\n except subprocess.CalledProcessError:\n return None",
"def find_executable(binary):\n\n\tfor syspath in os.environ.get('PATH', default_path).split(':'):\n\t\tif os.path.exists(os.path.join(syspath, binary)):\n\t\t\treturn os.path.join(syspath, binary)\n\n\treturn None",
"def main():\n if getattr(sys, 'frozen', False):\n folderCurrent = os.path.dirname(sys.executable)\n else:\n folderCurrent = os.path.abspath(os.path.dirname(__file__))\n\n replaceAll(folderCurrent)",
"def get_golem_path():\r\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"../\"))"
] | [
"0.64583826",
"0.6119704",
"0.60500836",
"0.5732794",
"0.5658576",
"0.56554246",
"0.5512849",
"0.5505444",
"0.54935056",
"0.5483672",
"0.5481498",
"0.5455168",
"0.5439266",
"0.5433778",
"0.54287785",
"0.5424196",
"0.5423426",
"0.5394256",
"0.53781176",
"0.5329684",
"0.5325822",
"0.52742755",
"0.52715653",
"0.52624583",
"0.5246451",
"0.5242646",
"0.52409595",
"0.52304184",
"0.5229665",
"0.5225051"
] | 0.6302872 | 1 |
Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result of os.environ.get("PATH"), or can be overridden with a custom search path. | def which(cmd, mode=os.F_OK | os.X_OK, path=None):
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode) and
not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly
# rather than referring to PATH directories. This includes checking
# relative to the current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if sys.platform == "win32":
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path
# extensions. This will allow us to short circuit when given
# "python.exe". If it does match, only test that one, otherwise we
# have to try others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode) and\n not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly\n # rather than referring to PATH directories. This includes checking\n # relative to the current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None",
"def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n\n def _access_check(fn, mode):\n return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)\n\n # If we're given a path with a directory part, look it up directly\n # rather than referring to PATH directories. This includes checking\n # relative to the current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if os.curdir not in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path\n # extensions. This will allow us to short circuit when given\n # \"python.exe\". If it does match, only test that one, otherwise we\n # have to try others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext.lower() for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n\n return None",
"def Which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # Short circuit. If we're given a full path which matches the mode\n # and it exists, we're done here.\n if _access_check(cmd, mode):\n return cmd\n\n path = (path or os.environ.get(\"PATH\", os.defpath)).split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if not os.curdir in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path extensions.\n # This will allow us to short circuit when given \"python.exe\".\n matches = [cmd for ext in pathext if cmd.lower().endswith(ext.lower())]\n # If it does match, only test that one, otherwise we have to try\n # others.\n files = [cmd] if matches else [cmd + ext.lower() for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for pathcomp in path:\n pathcomp = os.path.normcase(pathcomp)\n if not pathcomp in seen:\n seen.add(pathcomp)\n for thefile in files:\n name = os.path.join(pathcomp, thefile)\n if _access_check(name, mode):\n return name\n return None",
"def _shutil_which(cmd, mode=os.F_OK | os.X_OK, path=None):\n # Check that a given file can be accessed with the correct mode.\n # Additionally check that `file` is not a directory, as on Windows\n # directories pass the os.access check.\n def _access_check(fn, mode):\n return (os.path.exists(fn) and os.access(fn, mode)\n and not os.path.isdir(fn))\n\n # If we're given a path with a directory part, look it up directly rather\n # than referring to PATH directories. This includes checking relative to the\n # current directory, e.g. ./script\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return None\n\n if path is None:\n path = os.environ.get(\"PATH\", os.defpath)\n if not path:\n return None\n path = path.split(os.pathsep)\n\n if sys.platform == \"win32\":\n # The current directory takes precedence on Windows.\n if not os.curdir in path:\n path.insert(0, os.curdir)\n\n # PATHEXT is necessary to check on Windows.\n pathext = os.environ.get(\"PATHEXT\", \"\").split(os.pathsep)\n # See if the given file matches any of the expected path extensions.\n # This will allow us to short circuit when given \"python.exe\".\n # If it does match, only test that one, otherwise we have to try\n # others.\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [cmd]\n else:\n files = [cmd + ext for ext in pathext]\n else:\n # On other platforms you don't have things like PATHEXT to tell you\n # what file suffixes are executable, so just pass on cmd as-is.\n files = [cmd]\n\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if not normdir in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n return None",
"def which(cmd):\n for path in os.environ['PATH'].split(os.pathsep):\n path = path.strip('\"')\n cmd_path = os.path.join(path, cmd)\n if os.path.isfile(cmd_path) and os.access(cmd_path, os.X_OK):\n return cmd_path\n\n return None",
"def get_command_path(command):\n def excutable(command_path):\n return os.path.isfile(command_path) and os.access(command_path, os.X_OK)\n\n for path in os.environ[\"PATH\"].split(os.pathsep):\n command_path = os.path.join(path, command)\n if excutable(command_path):\n return command_path\n\n return None",
"def which(cls, cmd):\n abs_path_cmd = None\n if sys.version_info >= (3, 3):\n abs_path_cmd = shutil.which(cmd)\n else:\n abs_path_cmd = find_executable(cmd)\n return abs_path_cmd",
"def Which(binary, path=None):\n if path is None:\n path = os.environ.get('PATH', '')\n for p in path.split(':'):\n p = os.path.join(p, binary)\n if os.access(p, os.X_OK):\n return p\n return None",
"def which(cmd, path=None):\n if path is None:\n path = os.environ[\"PATH\"].split(os.pathsep)\n\n for prefix in path:\n filename = os.path.join(prefix, cmd)\n executable = os.access(filename, os.X_OK)\n is_not_directory = os.path.isfile(filename)\n if executable and is_not_directory:\n return True\n\n return False",
"def _FindExecutableOnPath(executable, path, pathext):\n\n if isinstance(pathext, six.string_types):\n raise ValueError(\n \"_FindExecutableOnPath(..., pathext='{0}') failed \"\n \"because pathext must be an iterable of strings, but got \"\n \"a string.\".format(pathext)\n )\n\n # Prioritize preferred extension over earlier in path.\n for ext in pathext:\n for directory in path.split(os.pathsep):\n # Windows can have paths quoted.\n directory = directory.strip('\"')\n full = os.path.normpath(os.path.join(directory, executable) + ext)\n # On Windows os.access(full, os.X_OK) is always True.\n if os.path.isfile(full) and os.access(full, os.X_OK):\n return full\n return None",
"def find_on_path(command):\n\n if 'PATH' not in os.environ:\n return False\n\n path = os.environ['PATH']\n for element in path.split(os.pathsep):\n if not element:\n continue\n filename = os.path.join(element, command)\n if os.path.isfile(filename) and os.access(filename, os.X_OK):\n return True\n\n return False",
"def cmdGetPath(self, cmd, die=True):\n rc, out, err = self.prefab.core.run(\"which %s\" % cmd, die=False, showout=False, profile=True)\n if rc > 0:\n if die:\n raise j.exceptions.RuntimeError(\"Did not find command: %s\" % cmd)\n else:\n return False\n return out.split(\"\\n\")[-1]",
"def _find_extractor_by_cmd(extractor_cmd):\n if not extractor_cmd:\n return None\n if Path(extractor_cmd).is_file():\n return extractor_cmd\n return shutil.which(extractor_cmd)",
"def which(file, env=os.environ):\n if file is None:\n return None\n for path in env.get('PATH', '').split(os.pathsep):\n if path:\n result = os.path.join(path, file)\n if os.path.exists(result):\n return os.path.realpath(result)\n return None",
"def find_executable(cls, name, cmd, dry_run=False):\n if cls.PATH is None:\n cls.PATH = os.environ[\"PATH\"].split(\":\")\n for pdir in cls.PATH:\n pcmd = os.path.join(pdir, cmd)\n if os.path.exists(pcmd):\n return pcmd\n if dry_run:\n return cmd\n raise SystemExit(\"%s '%s' does not exist\" % (name, cmd))",
"def FindExecutableOnPath(executable, path=None, pathext=None, allow_extensions=False):\n\n if not allow_extensions and os.path.splitext(executable)[1]:\n raise ValueError(\n \"FindExecutableOnPath({0},...) failed because first \"\n \"argument must not have an extension.\".format(executable)\n )\n\n if os.path.dirname(executable):\n raise ValueError(\n \"FindExecutableOnPath({0},...) failed because first \"\n \"argument must not have a path.\".format(executable)\n )\n\n if path is None:\n effective_path = _GetSystemPath()\n else:\n effective_path = path\n effective_pathext = (\n pathext\n if pathext is not None\n else _PlatformExecutableExtensions(platforms.OperatingSystem.Current())\n )\n\n return _FindExecutableOnPath(executable, effective_path, effective_pathext)",
"def which(executable):\n def is_executable(path):\n \"\"\"True if path exists and is executable.\"\"\"\n return (os.path.exists(path) and\n not os.path.isdir(path) and\n os.access(path, os.F_OK | os.X_OK))\n\n def normalize(path):\n \"\"\"Return canonical case-normalized path.\"\"\"\n return os.path.normcase(os.path.realpath(path))\n\n def path_list():\n \"\"\"Get executable path list.\"\"\"\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)\n\n def pathext_list():\n \"\"\"Get list of extensions to automatically search.\"\"\"\n return (os.environ.get(\"PATHEXT\") or \"\").split(os.pathsep)\n\n seen = set()\n\n for path in [normalize(p) for p in path_list()]:\n if path not in seen:\n for ext in [\"\"] + pathext_list():\n full_path = os.path.join(path, executable) + ext\n if is_executable(full_path):\n return full_path\n\n seen.add(path)\n\n return None",
"def ensure_file(path, mode):\n assert isinstance(path, Path)\n parent = path.parent()\n assert parent != path, \"Path and parent were the same!\"\n ensure_dir(parent)\n fd = path.open(mode)\n return fd",
"def find_binary_in_path(filename: str) -> str:\n if \"PATH\" not in os.environ:\n raise PATHNotFoundError\n for directory in os.environ[\"PATH\"].split(os.pathsep):\n binary = os.path.abspath(os.path.join(directory, filename))\n if os.path.isfile(binary) and os.access(binary, os.X_OK):\n return binary\n raise BinaryNotFoundError",
"def which_bin(cmd):\n cmd = [\"which\", cmd]\n try:\n return stderr_output(cmd).strip().split('\\n')[0]\n except CryptoritoError:\n return None",
"def _get_path(self, prompt):\n\n # When input from vim, vim escapes some special characters,\n # so we have to expand them first.\n cwd = vim.eval('expand(getcwd())')\n path = vim.eval('expand(input(\"%s\", \"\", \"file\"))' % prompt)\n if path == None or path == \"\":\n return None\n else:\n return os.path.join(cwd, os.path.expanduser(path))",
"def find_executable(executable, path=None):\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n base, ext = os.path.splitext(executable)\n\n if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):\n executable = executable + '.exe'\n\n if not os.path.isfile(executable):\n for p in paths:\n f = os.path.join(p, executable)\n if os.path.isfile(f):\n # the file exists, we have a shot at spawn working\n return f\n return None\n else:\n return executable",
"def find_executable(executable, path=None):\n import os, os.path, sys\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n extlist = ['']\n if os.name == 'os2':\n (base, ext) = os.path.splitext(executable)\n # executable files on OS/2 can have an arbitrary extension, but\n # .exe is automatically appended if no dot is present in the name\n if not ext:\n executable = executable + \".exe\"\n elif sys.platform == 'win32':\n pathext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(executable)\n if ext.lower() not in pathext:\n extlist = pathext\n for ext in extlist:\n execname = executable + ext\n if os.path.isfile(execname):\n return execname\n else:\n for p in paths:\n f = os.path.join(p, execname)\n if os.path.isfile(f):\n return f\n else:\n return None",
"def which(module, mode, exename):\n return _which(exename)",
"def find_in_PATH(filename: str):\n if path.isfile(filename):\n return path.normpath(filename)\n\n os_paths = os.environ['PATH'].split(path.pathsep)\n for os_path in os_paths:\n fullpath_file = path.join(os_path, filename)\n if path.isfile(fullpath_file):\n return path.normpath(fullpath_file)\n raise FileNotFoundError(f'could not find {filename}')",
"def _whicha(cmd, paths=None):\n import os\n if paths is None:\n paths = os.environ['PATH'].split(':')\n possibilities = [os.path.expanduser(os.path.join(p, cmd)) for p in paths]\n return filter(lambda bin: os.path.exists(bin), possibilities)",
"def with_path(target: pathlib.Path, cmd):\n\n def null_handler(signum, frame):\n pass # pragma: no cover\n\n signal.signal(signal.SIGINT, null_handler)\n return subprocess.Popen(cmd, env=_setup_env(target)).wait()",
"def get_fh(filename, mode):\n fh = None\n try:\n if mode == 'r':\n fh = open(filename,'r')\n elif mode == 'w':\n fh = open(filename,'w')\n else:\n raise ValueError('Command should be r or w')\n except IOError as e:\n print(e)\n except ValueError as e:\n print(e)\n return fh",
"def which(executable):\n if executable.startswith('/'):\n return executable\n\n path = os.environ['PATH'].split(os.pathsep)\n\n for executable_with_ext in _executable_names(executable):\n for entry in path:\n joined = os.path.join(entry, executable_with_ext)\n if os.path.isfile(joined) and os.access(joined, os.X_OK):\n return joined\n\n return None",
"def which(cls, cmd):\n return get_exe_path(cmd + '.exe')"
] | [
"0.77106726",
"0.7453694",
"0.727641",
"0.6943331",
"0.6540709",
"0.639578",
"0.60801244",
"0.59047854",
"0.5880955",
"0.5878199",
"0.57527226",
"0.5732597",
"0.5709009",
"0.5672643",
"0.5576627",
"0.5575957",
"0.55535084",
"0.5497285",
"0.5452454",
"0.5445605",
"0.5425212",
"0.5414241",
"0.5405033",
"0.53908074",
"0.537771",
"0.53698206",
"0.53634244",
"0.53582907",
"0.53363436",
"0.53197527"
] | 0.7460819 | 1 |
Helper function to compute value for fields debit/credit/amount_currency based on an amount and the currencies given in parameter | def compute_amount_fields(self, amount, src_currency, company_currency, invoice_currency=False):
amount_currency = False
currency_id = False
if src_currency and src_currency != company_currency:
amount_currency = amount
amount = src_currency.with_context(self._context).compute(amount, company_currency)
currency_id = src_currency.id
debit = amount > 0 and amount or 0.0
credit = amount < 0 and -amount or 0.0
if invoice_currency and invoice_currency != company_currency and not amount_currency:
amount_currency = src_currency.with_context(self._context).compute(amount, invoice_currency)
currency_id = invoice_currency.id
return debit, credit, amount_currency, currency_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_amount_fields(self, amount, src_currency, company_currency):\n amount_currency = False\n currency_id = False\n date = self.env.context.get('date') or fields.Date.today()\n company = self.env.context.get('company_id')\n company = self.env['res.company'].browse(company) if company else self.env.user.company_id\n if src_currency and src_currency != company_currency:\n amount_currency = amount\n amount = src_currency._convert(amount, company_currency, company, date)\n currency_id = src_currency.id\n debit = amount > 0 and amount or 0.0\n credit = amount < 0 and -amount or 0.0\n return debit, credit, amount_currency, currency_id",
"def getValue(currency=None):",
"def getFactor(currency):",
"def getUserCurrency():",
"def getCurrencies():",
"def getCurrencyFactor(id=None):",
"def getBaseCurrency():",
"def getActiveCurrency():",
"def process_conversion(queries, query, src, dst, val, currencies, wf):\n ####################################################################################################\n # Make the currency case insensitive\n ####################################################################################################\n if src:\n src = src.upper()\n if dst:\n dst = dst.upper()\n\n ####################################################################################################\n # Validate the currencies to check if its a currency or not\n ####################################################################################################\n if not validate_currencies(queries, query, src, dst, currencies, wf):\n return 100\n\n rate = search_rate(src, dst, wf)\n\n if rate == -1:\n wf.add_item('No exchange rate found for the especified currencies...', icon=ICON_ERROR)\n return 1\n\n ####################################################################################################\n # Gets the currency info\n ####################################################################################################\n src_currency_info = currencies[src]\n dst_currency_info = currencies[dst]\n\n cur_src_name = get_currency_name(src_currency_info)\n cur_dst_name = get_currency_name(dst_currency_info)\n\n cur_dst_symbol = str.decode(dst_currency_info['Simbol'], encoding='utf-8')\n flag_file_icon = wf.workflowfile('flags/{}'.format(dst_currency_info['Flag']))\n\n if not val:\n val = 1\n\n converted_rate = Decimal(val) * rate\n\n decimal_places = get_decimal_places_to_use(rate)\n\n fmt_converted_rate = format_result(wf, converted_rate, decimal_places)\n\n # module 1 will result in just the decimal part, if the decimal part is 0, then i'll show only 2 decimal places\n if (rate % Decimal(1)).compare(Decimal('0')) == 0:\n fmt_rate = format_result(wf, rate, 2)\n else:\n fmt_rate = format_result(wf, rate, decimal_places)\n\n title = cur_dst_symbol + ' ' + fmt_converted_rate\n sub_title = u'({}) -> ({}) with rate {} for query: {}'.format(cur_src_name, cur_dst_name, fmt_rate,\n ' '.join(query).upper())\n\n wf.add_item(title, sub_title, valid=True, arg=str(converted_rate), icon=flag_file_icon)\n\n ############################################################################################\n # Checks if an update is available, and add it to the output\n ############################################################################################\n if wf.update_available:\n handle_check_update(wf)\n\n return 0",
"def test_currency_case(self):\n form = copy.deepcopy(self.base_form)\n form[\"mc_currency\"] = \"UsD\"\n Payment.process_paypal_ipn(form)\n payments = Payment.query.all()\n self.assertEqual(payments[0].currency, Currency.US_Dollar.value)",
"def test_get_currency_using_get(self):\n pass",
"def get_currency_values_if_valid(self):\n home_value_exists = False\n foreign_value_exists = False\n if self.root.ids.home_currency_input.text == '':\n self.root.ids.home_currency_input.hint_text = 'Must enter an amount before calibrating'\n else:\n home_value_exists = True\n if self.root.ids.foreign_currency_input.text == '':\n self.root.ids.foreign_currency_input.hint_text = 'Must enter an amount before converting'\n else:\n foreign_value_exists = True\n if foreign_value_exists:\n try:\n foreign_amount = float(self.root.ids.foreign_currency_input.text)\n valid_foreign_amount = True\n except ValueError:\n self.root.ids.foreign_currency_input.text = ''\n self.root.ids.foreign_currency_input.hint_text = 'Invalid amount (not a number)'\n foreign_amount = 0\n valid_foreign_amount = False\n else:\n valid_foreign_amount = False\n foreign_amount = 0\n if home_value_exists:\n try:\n home_amount = float(self.root.ids.home_currency_input.text)\n valid_home_amount = True\n except ValueError:\n self.root.ids.home_currency_input.text = ''\n self.root.ids.home_currency_input.hint_text = 'Invalid amount (not a number)'\n home_amount = 0\n valid_home_amount = False\n else:\n valid_home_amount = False\n home_amount = 0\n\n return home_value_exists is foreign_value_exists is valid_foreign_amount is valid_home_amount is True, \\\n home_amount, foreign_amount",
"def currency(self, currency):\n allowed_values = [\"AED\", \"AFN\", \"ALL\", \"AMD\", \"ANG\", \"AOA\", \"ARS\", \"AUD\", \"AWG\", \"AZN\", \"BAM\", \"BBD\", \"BDT\", \"BGN\", \"BHD\", \"BIF\", \"BMD\", \"BND\", \"BOB\", \"BOV\", \"BRL\", \"BSD\", \"BTN\", \"BWP\", \"BYR\", \"BZD\", \"CAD\", \"CDF\", \"CHE\", \"CHF\", \"CHW\", \"CLF\", \"CLP\", \"CNY\", \"COP\", \"COU\", \"CRC\", \"CUC\", \"CUP\", \"CVE\", \"CZK\", \"DJF\", \"DKK\", \"DOP\", \"DZD\", \"EGP\", \"ERN\", \"ETB\", \"EUR\", \"FJD\", \"FKP\", \"GBP\", \"GEL\", \"GHS\", \"GIP\", \"GMD\", \"GNF\", \"GTQ\", \"GYD\", \"HKD\", \"HNL\", \"HRK\", \"HTG\", \"HUF\", \"IDR\", \"ILS\", \"INR\", \"IQD\", \"IRR\", \"ISK\", \"JMD\", \"JOD\", \"JPY\", \"KES\", \"KGS\", \"KHR\", \"KMF\", \"KPW\", \"KRW\", \"KWD\", \"KYD\", \"KZT\", \"LAK\", \"LBP\", \"LKR\", \"LRD\", \"LSL\", \"LTL\", \"LVL\", \"LYD\", \"MAD\", \"MDL\", \"MGA\", \"MKD\", \"MMK\", \"MNT\", \"MOP\", \"MRO\", \"MRU\", \"MUR\", \"MVR\", \"MWK\", \"MXN\", \"MXV\", \"MYR\", \"MZN\", \"NAD\", \"NGN\", \"NIO\", \"NOK\", \"NPR\", \"NZD\", \"OMR\", \"PAB\", \"PEN\", \"PGK\", \"PHP\", \"PKR\", \"PLN\", \"PYG\", \"QAR\", \"RON\", \"RSD\", \"RUB\", \"RWF\", \"SAR\", \"SBD\", \"SCR\", \"SDG\", \"SEK\", \"SGD\", \"SHP\", \"SLL\", \"SOS\", \"SRD\", \"SSP\", \"STD\", \"STN\", \"SVC\", \"SYP\", \"SZL\", \"THB\", \"TJS\", \"TMT\", \"TND\", \"TOP\", \"TRY\", \"TTD\", \"TWD\", \"TZS\", \"UAH\", \"UGX\", \"USD\", \"USN\", \"USS\", \"UYI\", \"UYU\", \"UZS\", \"VEF\", \"VES\", \"VND\", \"VUV\", \"WST\", \"XAF\", \"XCD\", \"XOF\", \"XPF\", \"YER\", \"ZAR\", \"ZMW\", \"ZWL\"] # noqa: E501\n if currency not in allowed_values:\n raise ValueError(\n \"Invalid value for `currency` ({0}), must be one of {1}\" # noqa: E501\n .format(currency, allowed_values)\n )\n\n self._currency = currency",
"def get_currency():\n return _currency",
"def course(self, currency, sum):\n if currency == \"USD\":\n url = \"https://finance.rambler.ru/currencies/USD/\"\n elif currency == \"EUR\":\n url = \"https://finance.rambler.ru/currencies/EUR/\"\n else:\n return sum * 1000\n site = requests.get(url)\n soup = bs4.BeautifulSoup(site.text, 'html.parser')\n com = float(soup.find(\"div\", attrs={\"class\": \"finance-currency-plate__currency\"}).text.split()[0])\n return com * sum * 1000",
"def bitcoins_to_currency(cls, currency, amount):\n if not (rate := cache.get(currency)):\n try:\n api_rate = cls.api_call(currency)\n decimals = Decimal(\"0.01\")\n total = amount * Decimal(str(api_rate))\n rate = total.quantize(decimals, rounding=ROUND_DOWN).normalize()\n except Exception:\n # Don't retry. Just send empty flag\n rate = cls.API_NOT_AVAILABLE\n if rate and rate != cls.API_NOT_AVAILABLE:\n cache.set(currency, rate)\n return rate",
"def _get_amount_value(\n self, cr, uid, ids, ifrs_line=None, period_info=None,\n fiscalyear=None, exchange_date=None, currency_wizard=None,\n number_month=None, target_move=None, pdx=None, undefined=None,\n two=None, one_per=False, bag=None, context=None):\n\n context = context and dict(context) or {}\n # TODO: Current Company's Currency shall be used: the one on wizard\n from_currency_id = ifrs_line.ifrs_id.company_id.currency_id.id\n to_currency_id = currency_wizard\n\n if number_month:\n if two:\n context = {\n 'period_from': number_month, 'period_to': number_month}\n else:\n period_id = period_info[number_month][1]\n context = {'period_from': period_id, 'period_to': period_id}\n else:\n context = {'whole_fy': True}\n\n # NOTE: This feature is not yet been implemented\n # context['partner_detail'] = pdx\n context['fiscalyear'] = fiscalyear\n context['state'] = target_move\n\n if ifrs_line.type == 'detail':\n res = self._get_sum_detail(\n cr, uid, ifrs_line.id, number_month,\n context=context)\n elif ifrs_line.type == 'total':\n res = self._get_grand_total(\n cr, uid, ifrs_line.id, number_month,\n one_per=one_per, bag=bag, context=context)\n elif ifrs_line.type == 'constant':\n res = self._get_constant(cr, uid, ifrs_line.id, number_month,\n context=context)\n else:\n res = 0.0\n\n if ifrs_line.type == 'detail':\n res = self.exchange(\n cr, uid, ids, res, to_currency_id, from_currency_id,\n exchange_date, context=context)\n return res",
"def getAmount2(*args):",
"def getActiveCurrencies():",
"def validate_payment_amount(\n self,\n value: Text,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> Dict[Text, Any]:\n\n credit_card = tracker.get_slot(\"credit_card\")\n cc_balance = tracker.get_slot(\"credit_card_balance\")\n account_balance = float(tracker.get_slot(\"account_balance\"))\n try:\n entity = get_entity_details(\n tracker, \"amount-of-money\"\n ) or get_entity_details(tracker, \"number\")\n amount_currency = parse_duckling_currency(entity)\n if not amount_currency:\n raise (TypeError)\n if account_balance < float(amount_currency.get(\"amount_of_money\")):\n dispatcher.utter_message(template=\"utter_insufficient_funds\")\n return {\"payment_amount\": None}\n return amount_currency\n except (TypeError, AttributeError):\n pass\n if value and value.lower() in cc_balance.get(credit_card.lower()):\n key = value.lower()\n amount = cc_balance.get(credit_card.lower()).get(key)\n amount_type = f\" (your {key})\"\n\n if account_balance < float(amount):\n dispatcher.utter_message(template=\"utter_insufficient_funds\")\n return {\"payment_amount\": None}\n return {\n \"payment_amount\": f\"{amount:.2f}\",\n \"payment_amount_type\": amount_type,\n \"currency\": \"$\",\n }\n\n else:\n dispatcher.utter_message(template=\"utter_no_payment_amount\")\n return {\"payment_amount\": None}",
"def getDefaultCurrency():",
"def getBalance(self, currency=''):\n\n if self.app.getExchange() == 'binance':\n if self.mode == 'live':\n model = BAuthAPI(self.app.getAPIKey(), self.app.getAPISecret())\n df = model.getAccount()\n if isinstance(df, pd.DataFrame):\n if currency == '':\n # retrieve all balances\n return df\n else:\n # retrieve balance of specified currency\n df_filtered = df[df['currency'] == currency]['available']\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR', 'GBP', 'USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n else:\n return 0.0\n else:\n # return dummy balances\n if currency == '':\n # retrieve all balances\n return self.balance\n else:\n if self.app.getExchange() == 'binance':\n self.balance = self.balance.replace('QUOTE', currency)\n else: \n # replace QUOTE and BASE placeholders\n if currency in ['EUR','GBP','USD']:\n self.balance = self.balance.replace('QUOTE', currency)\n else:\n self.balance = self.balance.replace('BASE', currency)\n\n if self.balance.currency[self.balance.currency.isin([currency])].empty:\n self.balance.loc[len(self.balance)] = [currency, 0, 0, 0]\n\n # retrieve balance of specified currency\n df = self.balance\n df_filtered = df[df['currency'] == currency]['available']\n\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR', 'GBP', 'USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n\n else:\n if self.mode == 'live':\n # if config is provided and live connect to Coinbase Pro account portfolio\n model = CBAuthAPI(self.app.getAPIKey(), self.app.getAPISecret(), self.app.getAPIPassphrase(), self.app.getAPIURL())\n if currency == '':\n # retrieve all balances\n return model.getAccounts()[['currency', 'balance', 'hold', 'available']]\n else:\n df = model.getAccounts()\n # retrieve balance of specified currency\n df_filtered = df[df['currency'] == currency]['available']\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR','GBP','USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))\n \n else:\n # return dummy balances\n\n if currency == '':\n # retrieve all balances\n return self.balance\n else:\n # replace QUOTE and BASE placeholders\n if currency in ['EUR','GBP','USD']:\n self.balance = self.balance.replace('QUOTE', currency)\n elif currency in ['BCH','BTC','ETH','LTC','XLM']:\n self.balance = self.balance.replace('BASE', currency)\n\n if self.balance.currency[self.balance.currency.isin([currency])].empty == True:\n self.balance.loc[len(self.balance)] = [currency,0,0,0]\n\n # retrieve balance of specified currency\n df = self.balance\n df_filtered = df[df['currency'] == currency]['available']\n\n if len(df_filtered) == 0:\n # return nil balance if no positive balance was found\n return 0.0\n else:\n # return balance of specified currency (if positive)\n if currency in ['EUR','GBP','USD']:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))\n else:\n return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))",
"def convert_amount(self, init, new_currency, amount):\r\n\r\n curr = CurrencyRates()\r\n curr_conversion = curr.convert(init, new_currency, amount)\r\n\r\n return curr_conversion",
"def get_balance(self, currency):\n\n result = self.api_query('getInfo', {'coinName': currency, 'need_new':0})\n\n #{'success': True, 'message': '', 'result': {'Currency': 'NXS', 'Balance': 1.55257461, 'Available': 1.55257461, 'Pending': 0.0, 'CryptoAddress': None}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 2}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255221}}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255362}}\n\n #{'success': False, 'message': 'INVALID_CURRENCY', 'result': None}\n #{'success': 1, 'return': {'rights': {'info': 1, 'trade': 1, 'deposit': 1, 'withdraw': 0}, 'funds': {'btc': 0.00705219, 'lsk': 1}, 'funds_incl_orders': {'btc': 0.00705219, 'lsk': 2}, 'transaction_count': 0, 'open_orders': 0, 'server_time': 1499255600}}\n try:\n result = {'success': True, 'message' :'', 'result':{'Currency': currency, 'Balance': result['return']['funds_incl_orders'][currency], 'Available': result['return']['funds'][currency], 'Pending': 0.0, 'CryptoAddress': None}}\n except:\n result = {'success': False, 'message' :'', 'result':{'Currency': currency, 'Balance': 0.0, 'Available': 0.0, 'Pending': 0.0, 'CryptoAddress': None}}\n return result",
"def getCurrency(self, cucd):\n obj = MSTCUR.get( cucd )\n if obj:\n return (obj.CMCRCUCD, obj.CMCRCUNM)\n return (None, None)",
"def getRoundedValue(currency=None):",
"def exchange(currency_from, currency_to, amount_from):\n x = analysis(currency_from, currency_to, amount_from)\n return(cal(x))",
"def _amount_residual(self, cr, uid, ids, field_names, args, context=None):\n res = {}\n if context is None:\n context = {}\n cur_obj = self.pool.get('res.currency')\n for move_line in self.browse(cr, uid, ids, context=context):\n res[move_line.id] = {\n 'amount_residual': 0.0,\n 'amount_residual_currency': 0.0,\n }\n\n if move_line.reconcile_id:\n continue\n if not move_line.account_id.type in ('payable', 'receivable'):\n #this function does not suport to be used on move lines not related to payable or receivable accounts\n continue\n\n if move_line.currency_id:\n move_line_total = move_line.amount_currency\n sign = move_line.amount_currency < 0 and -1 or 1\n else:\n move_line_total = move_line.debit - move_line.credit\n sign = (move_line.debit - move_line.credit) < 0 and -1 or 1\n line_total_in_company_currency = move_line.debit - move_line.credit\n context_unreconciled = context.copy()\n if move_line.reconcile_partial_id:\n for payment_line in move_line.reconcile_partial_id.line_partial_ids:\n if payment_line.id == move_line.id:\n continue\n if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:\n move_line_total += payment_line.amount_currency\n else:\n if move_line.currency_id:\n context_unreconciled.update({'date': payment_line.date})\n amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)\n move_line_total += amount_in_foreign_currency\n else:\n move_line_total += (payment_line.debit - payment_line.credit)\n line_total_in_company_currency += (payment_line.debit - payment_line.credit)\n\n result = move_line_total\n res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)\n res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency\n return res",
"def getcurrency(self):\n return self.__currency",
"def make_change_dp(amount, denominations):"
] | [
"0.76655096",
"0.6802662",
"0.67237234",
"0.6577972",
"0.65410954",
"0.6217159",
"0.620707",
"0.61840576",
"0.59770834",
"0.59099656",
"0.5873708",
"0.5872332",
"0.58294046",
"0.58154744",
"0.578612",
"0.57646793",
"0.5753153",
"0.57461447",
"0.57427317",
"0.57381535",
"0.5725122",
"0.57149595",
"0.56717056",
"0.56458056",
"0.56367564",
"0.56234604",
"0.55689025",
"0.5567864",
"0.55324084",
"0.5480228"
] | 0.71101564 | 1 |
1. Create a bucket with no max_ttl 2. Upload 1000 docs with exp = 100s 3. Set maxTTL on bucket as 60s 4. After 60s, run expiry pager, get item count, must be 1000 5. After 40s, run expiry pager again and get item count, must be 0 6. Now load another set of docs with exp = 100s 7. Run expiry pager after 60s and get item count, must be 0 | def test_set_maxttl_on_existing_bucket(self):
for bucket in self.buckets:
self._load_json(bucket, self.num_items, exp=100)
self._update_bucket_maxTTL(maxttl=60)
self.sleep(60, "waiting before running expiry pager...")
self.expire_pager(self.servers)
self.sleep(20, "waiting for item count to come down...")
for bucket in self.buckets:
items = RestConnection(self.master).get_active_key_count(bucket)
self.log.info("Doc expiry set to = 100s, maxTTL = 60s"
"(set after doc creation), after 60s, item count = {0}".format(items))
if items != self.num_items:
self.fail("FAIL: Items with larger expiry before maxTTL updation deleted!")
self.sleep(40, "waiting before running expiry pager...")
self.expire_pager(self.servers)
self.sleep(20, "waiting for item count to come down...")
for bucket in self.buckets:
items = RestConnection(self.master).get_active_key_count(bucket)
self.log.info("Doc expiry set to = 100s, maxTTL = 60s"
"(set after doc creation), after 100s,"
" item count = {0}".format(items))
if items != 0:
self.fail("FAIL: Items with not greater expiry set before maxTTL "
"updation not deleted after elapsed TTL!")
for bucket in self.buckets:
self._load_json(bucket, self.num_items, exp=100)
self.sleep(60, "waiting before running expiry pager...")
self.expire_pager(self.servers)
self.sleep(20, "waiting for item count to come down...")
for bucket in self.buckets:
items = RestConnection(self.master).get_active_key_count(bucket)
self.log.info("Doc expiry set to = 100s, maxTTL = 60s, after 100s,"
" item count = {0}".format(items))
if items != 0:
self.fail("FAIL: Items with not greater expiry not "
"deleted after elapsed maxTTL!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_maxttl_lesser_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)+500)\n self.sleep(int(self.maxttl), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))\n if items > 0:\n self.fail(\"Bucket maxTTL of {0} is not honored\".format(self.maxttl))\n else:\n self.log.info(\"SUCCESS: Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))",
"def test_update_maxttl(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=40)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 40s item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Updated ttl affects docs with larger expiry before updation!\")\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 100s item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with 100s as expiry before maxTTL updation still alive!\")",
"def test_maxttl_with_doc_updates(self):\n rest = RestConnection(self.master)\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=40)\n\n self.sleep(20, \"waiting to update docs with exp=60s...\")\n\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=60)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Docs with updated expiry deleted unexpectedly!\")\n\n self.sleep(20, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = rest.get_active_key_count(bucket)\n self.log.info(\"Items: {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with updated expiry not deleted after new exp has elapsed!\")",
"def test_maxttl_greater_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)-100)\n self.sleep(int(self.maxttl-100), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) - 100,\n self.maxttl-100,\n self.maxttl-100,\n items))\n if items == 0:\n self.log.info(\"SUCCESS: Docs with lesser expiry deleted\")\n else:\n self.fail(\"FAIL: Doc with lesser expiry still present past ttl\")",
"def test_maxttl_setting(self):\n maxttl = int(self.input.param(\"maxttl\", None))\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False,\n query_with_explain = False, query = False)\n self.sleep(20)\n self._verify_bucket_count_with_index_count()\n self.sleep(maxttl, \"waiting for docs to be expired automatically per maxttl rule\")\n self._expiry_pager(self.master)\n self.sleep(60, \"wait for expiry pager to run on all nodes...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Docs in source bucket is {0} after maxttl has elapsed\".format(items))\n if items != 0:\n self.fail(\"Docs in source bucket is not 0 after maxttl has elapsed\")\n self._verify_bucket_count_with_index_count()",
"def test_cli_bucket_maxttl_setting(self):\n self.rest.force_eject_node()\n\n shell = RemoteMachineShellConnection(self.master)\n if self.input.param('enable_ipv6', False):\n self.reset_and_enable_ipv6(self.master)\n set_index_storage_type = \" --index-storage-setting=memopt \"\n options = ' --cluster-port=8091 \\\n --cluster-ramsize=300 \\\n --cluster-index-ramsize=300 \\\n --services=data,index,query %s ' \\\n % set_index_storage_type\n o, e = shell.execute_couchbase_cli(cli_command=\"cluster-init\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Cluster initialized')\n\n self.log.info(\"Add new user after reset node! \")\n self.add_built_in_server_user(node=self.master)\n bucket_type = self.input.param(\"bucket_type\", \"couchbase\")\n options = ' --bucket=default \\\n --bucket-type={0} \\\n --bucket-ramsize=200 \\\n --max-ttl=400 \\\n --wait '.format(bucket_type)\n o, e = shell.execute_couchbase_cli(cli_command=\"bucket-create\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Bucket created')\n\n self.sleep(30, \"Sleep before loading doc using cbdocloader\")\n\n cluster_flag = \"-c\"\n bucket_quota_flag = \"-m\"\n data_set_location_flag = \"-d\"\n shell.execute_command(\n \"{0}cbdocloader -u Administrator -p password \"\n \"{3} {1} -b default {4} 100 {5} {2}travel-sample.zip\"\n .format(self.bin_path, self.master.ip, self.sample_path,\n cluster_flag, bucket_quota_flag,\n data_set_location_flag))\n shell.disconnect()\n\n buckets = RestConnection(self.master).get_buckets()\n for bucket in buckets:\n if bucket.name != \"default\":\n self.fail(\"default bucket did not get created\")\n\n \"\"\" check for load data into travel-sample bucket \"\"\"\n end_time = time.time() + 120\n num_actual = 0\n while time.time() < end_time:\n self.sleep(10)\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) == self.total_items_travel_sample:\n break\n self.assertTrue(int(num_actual) == self.total_items_travel_sample,\n \"Items number expected %s, actual %s\"\n % (self.total_items_travel_sample, num_actual))\n self.log.info(\"Total items %s \" % num_actual)\n self.sleep(400, \"Waiting for docs to expire as per maxttl\")\n self.expire_pager([self.master])\n self.sleep(20, \"Wait for expiry_purger to run\")\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) != 0:\n self.fail(\"Item count is not 0 after maxttl has elapsed\")\n else:\n self.log.info(\"SUCCESS: Item count is 0 after maxttl has elapsed\")",
"def test_max_items(self):\r\n timeline = Timeline(connection=self.c1, bucket=self.bucket, max_items=3)\r\n now = datetime.utcnow()\r\n\r\n timeline.add(self.key, 1, now)\r\n timeline.add(self.key, 2, now)\r\n timeline.add(self.key, 3, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)\r\n\r\n timeline.add(self.key, 4, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)",
"def __init__(self, bucket_size, bucket_fill_rate, current_time=None):\n self.__bucket_contents = bucket_size\n self.__bucket_size = bucket_size\n self.__bucket_fill_rate = bucket_fill_rate\n\n if current_time is None:\n current_time = time.time()\n\n self.__last_bucket_fill_time = current_time",
"def test01StoreExpiration(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 100):\n keys.append(s.Put(i, i))\n\n # This should not raise\n s.Get(keys[-1])\n\n # This should raise though\n self.assertRaises(KeyError, s.Get, keys[0])",
"def get_object_retention(Bucket=None, Key=None, VersionId=None, RequestPayer=None):\n pass",
"def test_many_expired_keys(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n for i in range(20):\n self.storage.set(i, i, moe=self.now + 1)\n self.now += 2\n self.gc.expire_random()\n for i in range(20):\n self.assertRaises(StorageKeyError, self.storage.get, i)",
"def test_maxttl_possible_values(self):\n # default\n rest = RestConnection(self.master)\n default_maxttl = rest.get_bucket_maxTTL()\n if default_maxttl != 0:\n self.fail(\"FAIL: default maxTTL if left unset must be 0 but is {0}\".format(default_maxttl))\n self.log.info(\"Verified: default maxTTL if left unset is {0}\".format(default_maxttl))\n\n # max value\n try:\n self._update_bucket_maxTTL(maxttl=2147483648)\n except Exception as e:\n self.log.info(\"Expected exception : {0}\".format(e))\n try:\n self._update_bucket_maxTTL(maxttl=2147483647)\n except Exception as e:\n self.fail(\"Unable to set maxTTL=2147483647, the max permitted value\")\n else:\n self.log.info(\"Verified: Max value permitted is 2147483647\")\n else:\n self.fail(\"Able to set maxTTL greater than 2147483647\")\n\n # min value\n try:\n self._update_bucket_maxTTL(maxttl=0)\n except Exception as e:\n self.fail(\"Unable to set maxTTL=0, the min permitted value\")\n else:\n self.log.info(\"Verified: Min value permitted is 0\")\n\n # negative value\n try:\n self._update_bucket_maxTTL(maxttl=-60)\n except Exception as e:\n self.log.info(\"Verified: negative values not permitted, exception : {0}\".format(e))\n else:\n self.fail(\"FAIL: Able to set a negative maxTTL\")\n\n # date/string\n try:\n self._update_bucket_maxTTL(maxttl=\"12/23/2016\")\n except Exception as e:\n self.log.info(\"Verified: string not permitted, exception : {0}\".format(e))\n else:\n self.fail(\"FAIL: Able to set a date string maxTTL\")",
"def test_get_ttl(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'}}\n moes = {'1': time.time() + 5, '4': time.time() + 10}\n for key in keys_to_set.keys():\n storage.set(key, keys_to_set[key], moes.get(key))\n # test at moment t\n self.assertEqual(keys_to_set['1'], storage.get('1'), \"Key '1' should still exist.\")\n # test at moment t+6, one key should expire\n self.now += 6\n keys_to_set.pop('1')\n moes.pop('1')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertEqual(keys_to_set['4'], storage.get('4'), \"Key '4' should still exist.\")\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")\n # test at moment t+11\n self.now += 5\n keys_to_set.pop('4')\n moes.pop('4')\n self.assertRaises(StorageKeyError, storage.get, '1')\n self.assertRaises(StorageKeyError, storage.get, '4')\n self.assertEqual(keys_to_set, storage._keys_dict, \"Remaining keys are wrong\")\n self.assertEqual(moes, storage._moe_dict, \"Remaining moes are wrong\")",
"def test_evict_expired(self):\n\n # use an invasive technique so that we don't have to sleep for\n # the item to expire\n\n bc = TimedCache(keep_time=1)\n\n bc[\"test\"] = \"value\"\n bc[\"test2\"] = \"value2\"\n self.assertEqual(len(bc), 2)\n\n # test that expired item i\n bc.cache[\"test\"].timestamp = bc.cache[\"test\"].timestamp - 2\n bc.purge_expired()\n self.assertEqual(len(bc), 1)\n self.assertFalse(\"test\" in bc)\n self.assertTrue(\"test2\" in bc)",
"def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size",
"def expiry(self):\n return time() + self.ttl * (0.95 + 0.1 * random())",
"def listget(base_url, keys, throttle, generic_rate, max_lookback, tmpdir, repo_configs, error_rate, get_rate):\n tname = threading.current_thread().name\n app.logger.debug(\"Thread:{x} - Initialise List/Get; base_url:{a}, throttle:{b}, generic_rate:{c}, max_lookback:{d}, tmpdir:{g}, error_rate:{h}, get_rate:{i}\".format(x=tname, a=base_url, b=throttle, c=generic_rate, d=max_lookback, g=tmpdir, h=error_rate, i=get_rate))\n\n genopts = [\"generic\", \"specific\"]\n genprobs = [generic_rate, 1 - generic_rate]\n\n getopts = [\"get\", \"leave\"]\n getprobs = [get_rate, 1 - get_rate]\n\n erropts = [\"err\", \"ok\"]\n errprobs = [error_rate, 1 - error_rate]\n\n errtypes = [\"page\", \"page_size\", \"missing_since\", \"malformed_since\"]\n errtypeprobs = [0.25] * 4\n\n while True:\n try:\n api_key = _select_from(keys)\n j = client.JPER(api_key, base_url)\n #print \"API \" + api_key\n\n # determine whether the metadata we're going to send will cause errors\n reqtype = _select_from(genopts, genprobs)\n #print \"Req: \" + reqtype\n\n # use this to determine the repository id for the request\n repository_id = None\n if reqtype == \"specific\":\n config = _select_from(repo_configs)\n repository_id = config.get(\"repository\")\n\n # determine the \"since\" date we're going to use for the request\n lookback = randint(0, max_lookback)\n since = dates.format(dates.before_now(lookback))\n # print \"Since: \" + since\n\n # choose a page size\n page_size = randint(1, 100)\n\n # now decide, after all that, if we're going to send a malformed request\n err = _select_from(erropts, errprobs)\n\n # if we are to make an erroneous request, go ahead and do it\n if err == \"err\":\n # choose a kind of malformed request\n malformed = _select_from(errtypes, errtypeprobs)\n params = {\"page\" : 1, \"pageSize\" : page_size, \"since\" : since}\n if malformed == \"page\":\n params[\"page\"] = \"one\"\n elif malformed == \"page_size\":\n params[\"pageSize\"] = \"twelvty\"\n elif malformed == \"missing_since\":\n del params[\"since\"]\n else:\n params[\"since\"] = \"a week last thursday\"\n\n # make the malformed url with the JPER client, so we know it gets there ok\n url = j._url(\"routed\", id=repository_id, params=params)\n app.logger.debug(\"Thread:{x} - List/Get sending malformed request for Account:{y} Type:{z} Error:{a} URL:{b}\".format(x=tname, y=api_key, z=reqtype, a=malformed, b=url))\n\n # make the request, and check the response\n resp = http.get(url)\n if resp is not None and resp.status_code == 400:\n app.logger.debug(\"Thread:{x} - List/Get received correct 400 response to malformed request\".format(x=tname))\n else:\n if resp is None:\n sc = None\n else:\n sc = resp.status_code\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; did not receive 400 response to malformed request, got {y}; URL:{z}\".format(x=tname, y=sc, z=url))\n\n # continue, so that we don't have to indent the code below any further\n continue\n\n # if we get to here, we're going to go ahead and do a normal request\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} Type:{z} Since:{a}\".format(x=tname, y=api_key, z=reqtype, a=since))\n\n # iterate over the notifications, catching any errors (which would be unexpected)\n try:\n count = 0\n for note in j.iterate_notifications(since, repository_id, page_size):\n app.logger.debug(\"Thread:{x} - List/Get request for Account:{y} listing notifications for Repository:{z} retrieved Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n count += 1\n\n # determine if we're going to get the notification by itself (which is technically unnecessary, of course, but who knows what people's workflows will be)\n reget = _select_from(getopts, getprobs)\n if reget == \"get\":\n try:\n n = j.get_notification(note.id)\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} listing notifications for Repository:{z}, successfully retrieved copy of Notification:{a}\".format(x=tname, y=api_key, z=repository_id, a=note.id))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get notification failed for Notification:{y} that should have existed. This needs a fix: '{b}'\".format(x=tname, y=note.id, b=e.message))\n\n # now retrieve all the links in the note\n for link in note.links:\n url = link.get(\"url\")\n app.logger.debug(\"Thread:{x} - Following List/Get for Account:{y} on Repository:{b}, from Notification:{z} requesting copy of Content:{a}\".format(x=tname, y=api_key, z=note.id, a=url, b=repository_id))\n try:\n stream, headers = j.get_content(url)\n except client.JPERAuthException as e:\n # we got a 401 back from the service, that is acceptable, since we may not be authorised to access it\n app.logger.debug((\"Thread:{x} - get content unauthorised (401) for Content:{z} - this can happen, so is not necessarily unexpected\".format(x=tname, z=url)))\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; get content failed for Content:{z} that should have existed. This needs a fix: '{b}'\".format(x=tname, z=url, b=e.message))\n\n app.logger.debug(\"Thread:{x} - List/Get request completed successfully for Account:{y} listing notifications for Repository:{z} Count:{a}\".format(x=tname, y=api_key, z=repository_id, a=count))\n\n except Exception as e:\n app.logger.error(\"Thread:{x} - MAJOR ISSUE; List/Get request for Account:{y} listing notifications for Repository:{z} resulted in exception '{e}'\".format(x=tname, y=api_key, z=repository_id, e=e.message))\n\n # sleep before making the next request\n time.sleep(throttle)\n except Exception as e:\n app.logger.error(\"Thread:{x} - Fatal exception '{y}'\".format(x=tname, y=e.message))",
"def expire(ttl):\n print(\"[+] Staring expiration of old endpoints.\")\n\n try:\n now = arrow.utcnow()\n expiration = now - timedelta(hours=ttl)\n endpoints = database.session_query(Endpoint).filter(\n cast(Endpoint.last_updated, ArrowType) <= expiration\n )\n\n for endpoint in endpoints:\n print(\n \"[!] Expiring endpoint: {name} Last Updated: {last_updated}\".format(\n name=endpoint.name, last_updated=endpoint.last_updated\n )\n )\n database.delete(endpoint)\n metrics.send(\"endpoint_expired\", \"counter\", 1)\n\n print(\"[+] Finished expiration.\")\n except Exception as e:\n sentry.captureException()",
"def create_thumbnails():\n bucket = BASE_BUCKET + ARG.MANIFOLD\n result = S3_CLIENT.list_objects(Bucket=bucket, Prefix=PREFIX + \"/\", Delimiter=\"/\")\n lev1 = result.get('CommonPrefixes')\n for lev1pre in tqdm(lev1, desc=\"Prefixes\"):\n bpre = lev1pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Prefixes\"] += 1\n #result2 = S3_CLIENT.list_objects(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n # Delimiter=\"/\")\n paginator = S3_CLIENT.get_paginator(\"list_objects\")\n pages = paginator.paginate(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n Delimiter=\"/\")\n for page in pages:\n COUNT[\"Pages\"] += 1\n lev2 = page.get('CommonPrefixes')\n for lev2pre in lev2:\n body = lev2pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Body IDs\"] += 1\n if ARG.WRITE:\n invoke_lambda(bucket, body)\n else:\n LOGGER.debug(\"/\".join([bucket, bpre, body]))\n print(COUNT)",
"def put_object_retention(Bucket=None, Key=None, Retention=None, RequestPayer=None, VersionId=None, BypassGovernanceRetention=None, ContentMD5=None):\n pass",
"def limit_for(self, expiration=10, **kwargs):\n key = self._get_key(**kwargs)\n self.redis_conn.set(key, 1)\n self.redis_conn.expire(key, expiration)",
"async def incr(req):\n key, ttl, err = validate_params(req)\n if err is not None:\n return err\n\n counter = incr_with_ttl(key, ttl)\n return web.json_response(data={'status': 'success', 'counter': counter})",
"def __init__(__self__, *,\n bucket: str,\n kind: str,\n retention_interval: str,\n upload_interval: str):\n pulumi.set(__self__, \"bucket\", bucket)\n pulumi.set(__self__, \"kind\", kind)\n pulumi.set(__self__, \"retention_interval\", retention_interval)\n pulumi.set(__self__, \"upload_interval\", upload_interval)",
"def _put_retry(self, s3_bucket, s3_filename, local_filename, max_retries=3, policy=None):\n b = self.conn.get_bucket(s3_bucket)\n retries = 0\n while retries < max_retries:\n try:\n s3_key = b.new_key(s3_filename)\n s3_key.set_contents_from_filename(local_filename, policy=policy)\n except:\n logger.info('File transfer error: ' + s3_filename, exc_info=True)\n retries = retries + 1\n if retries == max_retries:\n raise\n time.sleep(retries)\n else:\n logger.info('Archived %s to %s/%s', local_filename, s3_bucket, s3_filename)\n return os.path.getsize(local_filename)",
"def large_upload_collection(upload_items: List[JSONDict]) -> UploadCollection:\n items = []\n\n item = upload_items[0]\n for i in range(3050):\n copy = item.copy()\n copy[\"guid\"] = copy[\"guid\"].replace(\"post1\", f\"post{i}\")\n items.append(copy)\n\n collection = UploadCollection(items=items)\n return collection",
"def __init__(self):\n self.m = 1000\n self.bucket = [None] * 1000",
"def __init__(self, bucket):\n self.bucket = bucket",
"def set_ttl(self, ttl):",
"def do_rate_limited_ops(\n handle, num_seconds, do_writes, limit, max_rows, min_size, max_size):\n put_request = PutRequest().set_table_name(table_name)\n get_request = GetRequest().set_table_name(table_name)\n #\n # Generate a string of max_size with all \"x\"s in it\n #\n user_data = ''\n if do_writes:\n for x in range(max_size):\n user_data += 'x'\n\n start_time = int(round(time() * 1000))\n end_time = start_time + num_seconds * 1000\n\n print('Running continuous ' + ('writes' if do_writes else 'reads') +\n ' for ' + str(num_seconds) + ' seconds.')\n #\n # Keep track of how many units we used\n #\n units_used = 0\n #\n # With rate limiting enabled, we can find the amount of time our operation\n # was delayed due to rate limiting by getting the value from the result\n # using Result.get_rate_limit_delayed_ms().\n #\n delay_ms = 0\n\n key = dict()\n value = dict()\n while True:\n fld_id = int(random() * max_rows)\n try:\n if do_writes:\n value['id'] = fld_id\n value['sid'] = fld_id\n rec_size = int(random() * (max_size - min_size))\n rec_size += min_size\n value['name'] = user_data[:rec_size]\n put_request.set_value(value)\n put_result = handle.put(put_request)\n units_used += put_result.get_write_units()\n delay_ms += put_result.get_rate_limit_delayed_ms()\n else:\n key['id'] = fld_id\n key['sid'] = fld_id\n get_request.set_key(key)\n get_result = handle.get(get_request)\n units_used += get_result.get_read_units()\n delay_ms += get_result.get_rate_limit_delayed_ms()\n except WriteThrottlingException as wte:\n # We should not get WriteThrottlingException exception\n print('Got unexpected write throttling exception')\n raise wte\n except ReadThrottlingException as rte:\n # We should not get ReadThrottlingException exception\n print('Got unexpected read throttling exception')\n raise rte\n if int(round(time() * 1000)) >= end_time:\n break\n num_seconds = (int(round(time() * 1000)) - start_time) // 1000\n units_used /= num_seconds\n\n if units_used < int(limit * 0.8) or units_used > int(limit * 1.2):\n if do_writes:\n msg = ('Writes: expected around ' + str(limit) + ' WUs, got ' +\n str(units_used))\n else:\n msg = ('Reads: expected around ' + str(limit) + ' RUs, got ' +\n str(units_used))\n raise RuntimeError(msg)\n\n print(('Writes' if do_writes else 'Reads') + ': average usage = ' +\n str(units_used) + ('WUs' if do_writes else 'RUs') +\n ' (expected around ' + str(limit))\n\n print('Total rate limiter delay time = ' + str(delay_ms) + 'ms')",
"def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)"
] | [
"0.74554807",
"0.73381984",
"0.73129797",
"0.7263034",
"0.69875336",
"0.6090414",
"0.56455797",
"0.5475571",
"0.5398008",
"0.53416365",
"0.53413856",
"0.5322587",
"0.53129023",
"0.5301066",
"0.5245236",
"0.522405",
"0.52154046",
"0.5207431",
"0.5185581",
"0.5156831",
"0.51409173",
"0.5093467",
"0.5086045",
"0.5074731",
"0.5055",
"0.50472575",
"0.5002737",
"0.49924052",
"0.49907944",
"0.4980763"
] | 0.7790331 | 0 |
1. Create a bucket with ttl = 60s 2. Upload 1000 docs with exp = 40s 3. After 20s, Update docs with exp = 60s 4. After 40s, run expiry pager again and get item count, must be 1000 5. After 20s, run expiry pager again and get item count, must be 0 | def test_maxttl_with_doc_updates(self):
rest = RestConnection(self.master)
for bucket in self.buckets:
self._load_json(bucket, self.num_items, exp=40)
self.sleep(20, "waiting to update docs with exp=60s...")
for bucket in self.buckets:
self._load_json(bucket, self.num_items, exp=60)
self.sleep(40, "waiting before running expiry pager...")
self.expire_pager(self.servers)
for bucket in self.buckets:
items = rest.get_active_key_count(bucket)
self.log.info("Items: {0}".format(items))
if items != self.num_items:
self.fail("FAIL: Docs with updated expiry deleted unexpectedly!")
self.sleep(20, "waiting before running expiry pager...")
self.expire_pager(self.servers)
self.sleep(20, "waiting for item count to come down...")
for bucket in self.buckets:
items = rest.get_active_key_count(bucket)
self.log.info("Items: {0}".format(items))
if items != 0:
self.fail("FAIL: Docs with updated expiry not deleted after new exp has elapsed!") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_set_maxttl_on_existing_bucket(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=60)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 60s, item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Items with larger expiry before maxTTL updation deleted!\")\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s\"\n \"(set after doc creation), after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry set before maxTTL \"\n \"updation not deleted after elapsed TTL!\")\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL = 60s, after 100s,\"\n \" item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Items with not greater expiry not \"\n \"deleted after elapsed maxTTL!\")",
"def test_update_maxttl(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=100)\n self._update_bucket_maxTTL(maxttl=40)\n\n self.sleep(40, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 40s item count = {0}\".format(items))\n if items != self.num_items:\n self.fail(\"FAIL: Updated ttl affects docs with larger expiry before updation!\")\n\n self.sleep(60, \"waiting before running expiry pager...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = 100s, maxTTL at the time of doc creation = 200s\"\n \" updated maxttl = 40s, after 100s item count = {0}\".format(items))\n if items != 0:\n self.fail(\"FAIL: Docs with 100s as expiry before maxTTL updation still alive!\")",
"def test_maxttl_lesser_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)+500)\n self.sleep(int(self.maxttl), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))\n if items > 0:\n self.fail(\"Bucket maxTTL of {0} is not honored\".format(self.maxttl))\n else:\n self.log.info(\"SUCCESS: Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) + 500,\n self.maxttl,\n self.maxttl,\n items))",
"def test_maxttl_greater_doc_expiry(self):\n for bucket in self.buckets:\n self._load_json(bucket, self.num_items, exp=int(self.maxttl)-100)\n self.sleep(int(self.maxttl-100), \"waiting for all docs to expire per maxTTL rule...\")\n self.expire_pager(self.servers)\n self.sleep(20, \"waiting for item count to come down...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Doc expiry set to = {0}s, maxTTL = {1}s, after {2}s, item count = {3}\".format(\n int(self.maxttl) - 100,\n self.maxttl-100,\n self.maxttl-100,\n items))\n if items == 0:\n self.log.info(\"SUCCESS: Docs with lesser expiry deleted\")\n else:\n self.fail(\"FAIL: Doc with lesser expiry still present past ttl\")",
"def test_maxttl_setting(self):\n maxttl = int(self.input.param(\"maxttl\", None))\n self.run_multi_operations(buckets = self.buckets,\n query_definitions = self.query_definitions,\n create_index = True, drop_index = False,\n query_with_explain = False, query = False)\n self.sleep(20)\n self._verify_bucket_count_with_index_count()\n self.sleep(maxttl, \"waiting for docs to be expired automatically per maxttl rule\")\n self._expiry_pager(self.master)\n self.sleep(60, \"wait for expiry pager to run on all nodes...\")\n for bucket in self.buckets:\n items = RestConnection(self.master).get_active_key_count(bucket)\n self.log.info(\"Docs in source bucket is {0} after maxttl has elapsed\".format(items))\n if items != 0:\n self.fail(\"Docs in source bucket is not 0 after maxttl has elapsed\")\n self._verify_bucket_count_with_index_count()",
"def test_update_bucket(self):\n pass",
"def test_max_items(self):\r\n timeline = Timeline(connection=self.c1, bucket=self.bucket, max_items=3)\r\n now = datetime.utcnow()\r\n\r\n timeline.add(self.key, 1, now)\r\n timeline.add(self.key, 2, now)\r\n timeline.add(self.key, 3, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)\r\n\r\n timeline.add(self.key, 4, now)\r\n self.assertEqual(len(timeline.get(self.key)), 3)",
"async def incr(req):\n key, ttl, err = validate_params(req)\n if err is not None:\n return err\n\n counter = incr_with_ttl(key, ttl)\n return web.json_response(data={'status': 'success', 'counter': counter})",
"def __init__(self, bucket_size, bucket_fill_rate, current_time=None):\n self.__bucket_contents = bucket_size\n self.__bucket_size = bucket_size\n self.__bucket_fill_rate = bucket_fill_rate\n\n if current_time is None:\n current_time = time.time()\n\n self.__last_bucket_fill_time = current_time",
"def put_object_retention(Bucket=None, Key=None, Retention=None, RequestPayer=None, VersionId=None, BypassGovernanceRetention=None, ContentMD5=None):\n pass",
"def post_bucketlist():\n pass",
"def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())",
"def progress_update(sent, total):\n l.debug(\"%d of %d Mb uploaded to Amazon S3.\", sent / 1000000, total / 1000000)",
"def test_cli_bucket_maxttl_setting(self):\n self.rest.force_eject_node()\n\n shell = RemoteMachineShellConnection(self.master)\n if self.input.param('enable_ipv6', False):\n self.reset_and_enable_ipv6(self.master)\n set_index_storage_type = \" --index-storage-setting=memopt \"\n options = ' --cluster-port=8091 \\\n --cluster-ramsize=300 \\\n --cluster-index-ramsize=300 \\\n --services=data,index,query %s ' \\\n % set_index_storage_type\n o, e = shell.execute_couchbase_cli(cli_command=\"cluster-init\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Cluster initialized')\n\n self.log.info(\"Add new user after reset node! \")\n self.add_built_in_server_user(node=self.master)\n bucket_type = self.input.param(\"bucket_type\", \"couchbase\")\n options = ' --bucket=default \\\n --bucket-type={0} \\\n --bucket-ramsize=200 \\\n --max-ttl=400 \\\n --wait '.format(bucket_type)\n o, e = shell.execute_couchbase_cli(cli_command=\"bucket-create\",\n options=options)\n self.assertEqual(o[0], 'SUCCESS: Bucket created')\n\n self.sleep(30, \"Sleep before loading doc using cbdocloader\")\n\n cluster_flag = \"-c\"\n bucket_quota_flag = \"-m\"\n data_set_location_flag = \"-d\"\n shell.execute_command(\n \"{0}cbdocloader -u Administrator -p password \"\n \"{3} {1} -b default {4} 100 {5} {2}travel-sample.zip\"\n .format(self.bin_path, self.master.ip, self.sample_path,\n cluster_flag, bucket_quota_flag,\n data_set_location_flag))\n shell.disconnect()\n\n buckets = RestConnection(self.master).get_buckets()\n for bucket in buckets:\n if bucket.name != \"default\":\n self.fail(\"default bucket did not get created\")\n\n \"\"\" check for load data into travel-sample bucket \"\"\"\n end_time = time.time() + 120\n num_actual = 0\n while time.time() < end_time:\n self.sleep(10)\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) == self.total_items_travel_sample:\n break\n self.assertTrue(int(num_actual) == self.total_items_travel_sample,\n \"Items number expected %s, actual %s\"\n % (self.total_items_travel_sample, num_actual))\n self.log.info(\"Total items %s \" % num_actual)\n self.sleep(400, \"Waiting for docs to expire as per maxttl\")\n self.expire_pager([self.master])\n self.sleep(20, \"Wait for expiry_purger to run\")\n num_actual = self.get_item_count(self.master, \"default\")\n if int(num_actual) != 0:\n self.fail(\"Item count is not 0 after maxttl has elapsed\")\n else:\n self.log.info(\"SUCCESS: Item count is 0 after maxttl has elapsed\")",
"def test_many_expired_keys(self):\n self.now = time.time()\n with patch('time.time', self.fake_time):\n for i in range(20):\n self.storage.set(i, i, moe=self.now + 1)\n self.now += 2\n self.gc.expire_random()\n for i in range(20):\n self.assertRaises(StorageKeyError, self.storage.get, i)",
"def create_thumbnails():\n bucket = BASE_BUCKET + ARG.MANIFOLD\n result = S3_CLIENT.list_objects(Bucket=bucket, Prefix=PREFIX + \"/\", Delimiter=\"/\")\n lev1 = result.get('CommonPrefixes')\n for lev1pre in tqdm(lev1, desc=\"Prefixes\"):\n bpre = lev1pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Prefixes\"] += 1\n #result2 = S3_CLIENT.list_objects(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n # Delimiter=\"/\")\n paginator = S3_CLIENT.get_paginator(\"list_objects\")\n pages = paginator.paginate(Bucket=bucket, Prefix=\"/\".join([PREFIX, bpre]) + \"/\",\n Delimiter=\"/\")\n for page in pages:\n COUNT[\"Pages\"] += 1\n lev2 = page.get('CommonPrefixes')\n for lev2pre in lev2:\n body = lev2pre.get('Prefix').split(\"/\")[-2]\n COUNT[\"Body IDs\"] += 1\n if ARG.WRITE:\n invoke_lambda(bucket, body)\n else:\n LOGGER.debug(\"/\".join([bucket, bpre, body]))\n print(COUNT)",
"def test_metering_database(self):\n self.with_deletion = self.input.param(\"delete\", False)\n self.db_name = \"%s-testmetering\" % self.db_name\n # validate initial throughput is 5000/3 = 1666\n for bucket in self.cluster.buckets:\n print(bucket.servers)\n self.assertEqual(self.bucket_util.get_throttle_limit(bucket),\n self.bucket_throttling_limit)\n\n # validate create, update, delete stat\n for op_type in [\"create\", \"update\"]:\n if op_type == \"create\":\n self.load_data(create_start=0, create_end=self.num_items, create_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if op_type == \"update\":\n self.load_data(update_start=0, update_end=self.num_items, update_perc=100, mutated=1)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)\n if self.with_deletion:\n self.log.info(\"performing delete operation\")\n self.load_data(delete_start=0, delete_end=self.num_items, delete_perc=100)\n self.update_expected_stat(self.key_size, self.doc_size,\n 0, self.num_items, self.cluster.buckets)",
"def handler(event, context):\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');",
"def update_bucketlist():\n pass",
"def __init__(__self__, *,\n bucket: str,\n kind: str,\n retention_interval: str,\n upload_interval: str):\n pulumi.set(__self__, \"bucket\", bucket)\n pulumi.set(__self__, \"kind\", kind)\n pulumi.set(__self__, \"retention_interval\", retention_interval)\n pulumi.set(__self__, \"upload_interval\", upload_interval)",
"def get_object_retention(Bucket=None, Key=None, VersionId=None, RequestPayer=None):\n pass",
"def upload(filename, bucket):\n k = Key(bucket)\n k.key = uuid.uuid1().hex\n print \"Uploading batch to {}, key: {}...\".format(bucket.name, k.key)\n k.set_contents_from_filename(filename, reduced_redundancy=True)\n print \" Done.\"\n \n\n\n bucket = openBucket(dest)",
"def do_rate_limited_ops(\n handle, num_seconds, do_writes, limit, max_rows, min_size, max_size):\n put_request = PutRequest().set_table_name(table_name)\n get_request = GetRequest().set_table_name(table_name)\n #\n # Generate a string of max_size with all \"x\"s in it\n #\n user_data = ''\n if do_writes:\n for x in range(max_size):\n user_data += 'x'\n\n start_time = int(round(time() * 1000))\n end_time = start_time + num_seconds * 1000\n\n print('Running continuous ' + ('writes' if do_writes else 'reads') +\n ' for ' + str(num_seconds) + ' seconds.')\n #\n # Keep track of how many units we used\n #\n units_used = 0\n #\n # With rate limiting enabled, we can find the amount of time our operation\n # was delayed due to rate limiting by getting the value from the result\n # using Result.get_rate_limit_delayed_ms().\n #\n delay_ms = 0\n\n key = dict()\n value = dict()\n while True:\n fld_id = int(random() * max_rows)\n try:\n if do_writes:\n value['id'] = fld_id\n value['sid'] = fld_id\n rec_size = int(random() * (max_size - min_size))\n rec_size += min_size\n value['name'] = user_data[:rec_size]\n put_request.set_value(value)\n put_result = handle.put(put_request)\n units_used += put_result.get_write_units()\n delay_ms += put_result.get_rate_limit_delayed_ms()\n else:\n key['id'] = fld_id\n key['sid'] = fld_id\n get_request.set_key(key)\n get_result = handle.get(get_request)\n units_used += get_result.get_read_units()\n delay_ms += get_result.get_rate_limit_delayed_ms()\n except WriteThrottlingException as wte:\n # We should not get WriteThrottlingException exception\n print('Got unexpected write throttling exception')\n raise wte\n except ReadThrottlingException as rte:\n # We should not get ReadThrottlingException exception\n print('Got unexpected read throttling exception')\n raise rte\n if int(round(time() * 1000)) >= end_time:\n break\n num_seconds = (int(round(time() * 1000)) - start_time) // 1000\n units_used /= num_seconds\n\n if units_used < int(limit * 0.8) or units_used > int(limit * 1.2):\n if do_writes:\n msg = ('Writes: expected around ' + str(limit) + ' WUs, got ' +\n str(units_used))\n else:\n msg = ('Reads: expected around ' + str(limit) + ' RUs, got ' +\n str(units_used))\n raise RuntimeError(msg)\n\n print(('Writes' if do_writes else 'Reads') + ': average usage = ' +\n str(units_used) + ('WUs' if do_writes else 'RUs') +\n ' (expected around ' + str(limit))\n\n print('Total rate limiter delay time = ' + str(delay_ms) + 'ms')",
"def test_bucket_is_updated(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 201)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['name'] == 'Adventure')\n self.assertEqual(data['id'], 1)",
"def testBucketSize(self):\n b = SomeBucket()\n fit = b.add(1000)\n self.assertEqual(100, fit)",
"def test01StoreExpiration(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 100):\n keys.append(s.Put(i, i))\n\n # This should not raise\n s.Get(keys[-1])\n\n # This should raise though\n self.assertRaises(KeyError, s.Get, keys[0])",
"def __init__(self):\n self.size = 1000\n self.bucket = [None] * self.size",
"def large_upload_collection(upload_items: List[JSONDict]) -> UploadCollection:\n items = []\n\n item = upload_items[0]\n for i in range(3050):\n copy = item.copy()\n copy[\"guid\"] = copy[\"guid\"].replace(\"post1\", f\"post{i}\")\n items.append(copy)\n\n collection = UploadCollection(items=items)\n return collection",
"def uploadFilestoS3(self):\n allfilesuploadedcount = 0\n for eachfiledic in self.fileTobeUploaded:\n if eachfiledic[\"uploadedSuccess\"] == 0: #Means this file never got uploaded.\n if os.path.getsize(eachfiledic[\"filepath\"]) < 1000000000: #<1GB\n s3Log.info (\"FileSize < 1GB for :{}, so using single part upload.\".format(eachfiledic[\"filepath\"]) )\n if self.singlePartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n else:\n s3Log.info (\"FileSize > 1GB for :{}, so using Multi Part upload. \\n\".format(eachfiledic[\"filepath\"]) )\n if self.multiPartUpload(eachfiledic) == True:\n eachfiledic[\"uploadedSuccess\"] = 1\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n\n elif eachfiledic[\"uploadedSuccess\"] == 1: #Means it got uploaded in the last run.\n allfilesuploadedcount = allfilesuploadedcount + 1\n\n self.saveStateOfThisRun()\n if len(self.fileTobeUploaded) == allfilesuploadedcount: #Means we uploaded all files in the queue\n return True\n else:\n return False",
"def expire(event, context):\n # scan the database for expired files\n expiry_at = datetime.utcnow() - runtime_context.NONSTORED_TIMEOUT\n files = FileModel.list_expired(expiry_at)\n # remove all files and all items one-by-one\n for file in files:\n file_id = file['id']['S']\n FileModel.update({\n 'id': file_id,\n 'deleted_at': datetime.utcnow()\n })\n LOGGER.debug('Files item updated (expired). service=ddb method=update_item id={}'.format(file_id))\n S3_CLIENT.delete_object(\n Bucket=runtime_context.BUCKET_NAME,\n Key=file_id\n )\n LOGGER.debug('S3 object deleted. service=s3 method=delete_object id={}'.format(file_id))"
] | [
"0.70659494",
"0.6699331",
"0.6606719",
"0.65539116",
"0.58606344",
"0.5722448",
"0.5579716",
"0.5559856",
"0.5538106",
"0.5445731",
"0.5444631",
"0.54434264",
"0.54030365",
"0.5382592",
"0.53746575",
"0.53511035",
"0.53469026",
"0.53101665",
"0.5309392",
"0.5298697",
"0.5277794",
"0.52463275",
"0.52454925",
"0.5243784",
"0.52329767",
"0.5169961",
"0.51501995",
"0.51417065",
"0.51326084",
"0.5132004"
] | 0.71103334 | 0 |
Use active_ids from the context to fetch the leads | def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
record_ids = context.get('active_ids', False)
res = super(crm_lead_stage, self).default_get(cr, uid, fields, context=context)
if record_ids:
opp_ids = []
opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context)
for opp in opps:
opp_ids.append(opp.id)
if 'lead_ids' in fields:
res.update({'lead_ids': opp_ids})
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def queryset(self, request):\n qs = super(AdRepLeadAdmin, self).queryset(request)\n qs = AdRepLead.objects.select_related().filter(id__in=qs\n ).defer('site__envelope',\n 'site__geom',\n 'site__point')\n return qs",
"def get_locations_by_ids(self, id_list):",
"def prepare_related_bulletins(self, object):\n roles = ActorRole.objects.filter(\n actor=object.id).filter(bulletin__isnull=False)\n\n related_bulletins = [\n '/api/v1/bulletin/{0}/'.format(b.id)\n for ar in roles\n for b in ar.bulletin_set.all()\n ]\n\n return related_bulletins",
"def _get_ads(self, params):\n return self._api.account.get_ads(params={**params, **self._state_filter()}, fields=[self.state_pk])",
"def leads(self):\n from hubspot3.leads import LeadsClient\n\n return LeadsClient(**self.auth, **self.options)",
"def get_activities():\n pass",
"def get_all(self, *ids):",
"def get_activity_list(self):\n return self._request_activity_list(self.athlete)",
"def queryset(self, request):\n qs = super(AdRepAdmin, self).queryset(request)\n qs = AdRep.objects.select_related().filter(id__in=qs\n ).defer('site__envelope',\n 'site__geom',\n 'site__point')\n return qs",
"def queryset(self, request):\n qs = super(AdRepConsumerAdmin, self).queryset(request)\n qs = AdRepConsumer.objects.select_related().filter(id__in=qs\n ).defer('consumer__site__envelope',\n 'consumer__site__geom',\n 'consumer__site__point')\n return qs",
"def queryset(self, request):\n qs = super(AdRepAdvertiserAdmin, self).queryset(request)\n qs = AdRepAdvertiser.objects.select_related().filter(id__in=qs\n ).defer('advertiser__site__envelope',\n 'advertiser__site__geom',\n 'advertiser__site__point')\n return qs",
"def fetchById(accountIdList):\n accounts= []\n url = accountsConfig['domain']\n for accId in accountIdList:\n r = requests.get(url +'/'+ str(accId), headers=accountsConfig['headers']).json()\n accounts.append(r)\n return accounts",
"def lead_list(request):\n if request.method == 'GET':\n snippets = Lead.objects.all()\n serializer = LeadSerializer(snippets, many=True)\n return JSONResponse(serializer.data)\n\n elif request.method == 'POST':\n data = JSONParser().parse(request)\n serializer = LeadSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JSONResponse(serializer.data, status=201)\n return JSONResponse(serializer.errors, status=400)",
"def queryset(self, request):\n qs = super(TwitterAccountAdmin, self).queryset(request)\n qs = TwitterAccount.objects.select_related().filter(id__in=qs\n ).defer('site__envelope', 'site__geom', 'site__point')\n return qs",
"def queryset(self, request):\n qs = super(AdRepSiteAdmin, self).queryset(request)\n qs = AdRepSite.objects.select_related().filter(\n id__in=qs).defer('site__envelope', 'site__geom', 'site__point')\n return qs",
"def fetch_from_db(self):\n self._potential_deals = DBApi.get_instance().potential_records\n self._filters = DBApi.get_instance().filters\n # Add markdown for url\n for data in self._potential_deals:\n data[\"url\"] = f\"[Link]({data['url']})\"\n self._potential_deals_cols = self._db_api.get_potential_deal_columns()\n self._years = self._db_api.get_unique_years(self._potential_deals)\n self._make_model = self._db_api.get_all_make_models()\n self._action_options = [\"Action1\", \"Action2\", \"Action3\"]",
"def get_deals_list(self, session) -> List:\n\n deals = session.query(\n Deals.id,\n Deals.linkedin,\n Deals.leadgen_id\n ).all()\n\n return deals",
"def default_get(self, cr, uid, fields, context=None):\n if context is None:\n context = {}\n\n exchang_obj = self.pool.get('exchange.order')\n res ={}\n exchang_ids = context.get('active_ids', [])\n if not exchang_ids:\n return res\n\n result = []\n for req in exchang_obj.browse(cr, uid, exchang_ids, context=context):\n for product in req.order_line:\n result.append(self.__create_products(product))\n res.update({'products_ids': result})\n if 'current_date' in fields:\n res.update({'current_date': time.strftime('%Y-%m-%d %H:%M:%S')})\n return res",
"def get_queryset(self):\n qs = super(JobActiveMixin, self).get_queryset()\n return qs.actives()",
"def list(self,request,*args,**kwargs):\n response=super(ListAPIView,self).list(request,*args,**kwargs)\n #add applied_filters to the response which is set when filter_queryset method is called\n response=self.addAppliedFilters(response)\n #fetch data from the related views\n return self.fetch_related(request,response,*args,**kwargs)",
"def _links_get(self, cr, uid, context=None):\n obj = self.pool.get('res.request.link')\n ids = obj.search(cr, uid, [])\n res = obj.read(cr, uid, ids, ['object', 'name'], context)\n return [(r['object'], r['name']) for r in res]",
"def get_activities(cls):\n objs = cls.objects\n return objs",
"def list(self, request):\n\n records = filter_against_records(request)\n \n if 'faculty_id' in request.query_params:\n faculty = Faculties.objects.filter(id=request.query_params.get('faculty_id'))[0]\n departments = Departments.objects.filter(faculty_id=model_to_dict(faculty)['id'])\n for department in departments:\n education_programs = EducationPrograms.objects.filter(main_department_id=model_to_dict(department)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'department_id' in request.query_params:\n department = Departments.objects.filter(id=request.query_params.get('department_id'))[0]\n education_programs = EducationPrograms.objects.filter(main_department_id=model_to_dict(department)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'start_year_id' in request.query_params:\n start_year = StartYears.objects.filter(id=request.query_params.get('start_year_id'))[0]\n education_programs = EducationPrograms.objects.filter(start_year_id=model_to_dict(start_year)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'specialization_id' in request.query_params:\n specialization = Specializations.objects.filter(id=request.query_params.get('specialization_id'))[0]\n education_programs = EducationPrograms.objects.filter(specialization_id=model_to_dict(specialization)['id'])\n new_records_id = []\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'education_level_id' in request.query_params:\n education_level = EducationLevels.objects.filter(id=request.query_params.get('education_level_id'))[0]\n education_programs = EducationPrograms.objects.filter(education_level_id=model_to_dict(education_level)['id'])\n for education_program in education_programs:\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'education_program_id' in request.query_params:\n education_program = EducationPrograms.objects.filter(id=request.query_params.get('education_program_id'))[0]\n groups = Groups.objects.filter(education_program_id=model_to_dict(education_program)['id'])\n new_records_id = []\n for group in groups:\n for record in records:\n if model_to_dict(group)['id'] == model_to_dict(record)['group_id']:\n new_records_id.append(model_to_dict(record)['id'])\n records = records.filter(id__in=new_records_id)\n\n if 'student_id' in request.query_params:\n records = records.filter(student_id=request.query_params.get('student_id'))\n\n \n\n \n \n \n students = Students.objects.all()\n res = []\n for student in students:\n student_records = records.filter(student_id=model_to_dict(student)['id'])\n if len(student_records) > 0:\n res.append(student)\n\n return Response(normalize_students(res))",
"def get_queryset(self):\r\n if self.request.user.is_authenticated:\r\n activitystreams = ActivityStream.objects.all().order_by('created').reverse()\r\n follows = Follow.objects.filter(source=self.request.user).all()\r\n from subscription.models import Subscription\r\n subscriptions = Subscription.objects.filter(user=self.request.user).all()\r\n subscribed_communities = []\r\n to_be_filtered = []\r\n follow_targets = []\r\n for subscription in subscriptions:\r\n subscribed_communities.append(str(subscription.community.id))\r\n for follow in follows:\r\n follow_targets.append(str(follow.target.id))\r\n for activitystream in activitystreams:\r\n json_data = json.loads(activitystream.data)\r\n if \"actor\" in json_data:\r\n json_actor = json_data['actor']\r\n if \"http://\" + SERVER_ADDRESS + \"/users/view/\" in json_actor:\r\n json_actor = json_actor.replace(\"http://\" + SERVER_ADDRESS + \"/users/view/\", \"\")\r\n if str(json_actor) in follow_targets:\r\n to_be_filtered.append(activitystream.id)\r\n if \"target\" in json_data:\r\n json_target = json_data['target']\r\n if \"http://\" + SERVER_ADDRESS + \"/users/view/\" in json_target:\r\n json_target = json_target.replace(\"http://\" + SERVER_ADDRESS + \"/users/view/\", \"\")\r\n if str(json_target) == str(self.request.user.id):\r\n to_be_filtered.append(activitystream.id)\r\n if \"http://\" + SERVER_ADDRESS + \"/communities/\" in json_target:\r\n json_target = json_target.replace(\"http://\" + SERVER_ADDRESS + \"/communities/\", \"\")\r\n if json_target in subscribed_communities:\r\n to_be_filtered.append(activitystream.id)\r\n activitystreams = activitystreams.filter(id__in=to_be_filtered)\r\n return activitystreams\r\n else:\r\n return None",
"def get_queryset(self):\n return Participant.active.all()",
"def fetch_activities(access_token):\n\n headers = {\"Authorization\": \"Bearer \" + access_token}\n\n # Fetch list of athlete's activities\n activities = []\n page = 1\n while True:\n params = {\"per_page\": MAX_ACTIVITIES_PER_PAGE, \"page\": page}\n r = requests.get(API_URL + \"/athlete/activities\", headers=headers, params=params)\n new_activities = r.json()\n\n if \"errors\" in new_activities:\n raise AuthError(new_activities[\"message\"])\n activities.extend(new_activities)\n\n # Continue fetching activities if necessary\n if len(new_activities) == MAX_ACTIVITIES_PER_PAGE:\n page += 1\n else:\n break\n\n return activities",
"async def get_contacts_for_active_monitor(dbcon: DBConnection, monitor_id: int) -> Iterable[object_models.Contact]:\n q = \"\"\"select\n contacts.id, contacts.name, contacts.email, contacts.phone, contacts.active\n from active_monitor_contacts, contacts\n where active_monitor_contacts.active_monitor_id = %s\n and active_monitor_contacts.contact_id = contacts.id\"\"\"\n contacts = [object_models.Contact(*row) for row in await dbcon.fetch_all(q, (monitor_id,))]\n return contacts",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json",
"def activities(self, start=None, limit=None, done=None, exclude=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/activities'.format(self.get_url())\r\n return http.Request('GET', url, params), parsers.parse_json"
] | [
"0.6057764",
"0.52192557",
"0.5219104",
"0.50897825",
"0.5034502",
"0.5022137",
"0.4999737",
"0.49886566",
"0.49597502",
"0.4957497",
"0.49499336",
"0.49414816",
"0.49347013",
"0.489892",
"0.4863161",
"0.48234197",
"0.4823024",
"0.48136124",
"0.481186",
"0.47834936",
"0.47805104",
"0.4776128",
"0.47657222",
"0.47245422",
"0.46960744",
"0.46852517",
"0.46827942",
"0.46762693",
"0.46762693",
"0.46762693"
] | 0.69877815 | 0 |
Use lead_ids from the wizard and set to new stage | def action_multi_lead_stage(self, cr, uid, ids, context=None):
if context is None:
context = {}
wizard = self.browse(cr, uid, ids[0], context=context)
lead_ids = wizard.lead_ids
if lead_ids:
for lead in lead_ids:
self.pool.get('crm.lead').write(cr, uid, [lead.id], {'stage_id':wizard.stage_id.id},context)
return {'type': 'ir.actions.act_window_close'} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _onchange_stage_id_values(self, stage_id):\n if not stage_id:\n return {}\n print('1111')\n\n call_attempt = len(self.env['call.attempt'].browse(self.call_attempt_ids))\n call_pitch = len(self.env['call.pitch'].browse(self.call_pitch_ids))\n contact_meeting = len(self.env['contact.meeting'].browse(self.contact_meeting_ids))\n # file_attached = len(self.env['ir.attachment'].search([('res_model','=','res.partner'),('res_id','=',self.id)]))\n msg=''\n ## file attached\n file_attached = len(\n self.env['ir.attachment'].search([('res_model', '=', 'res.partner'), ('res_id', '=', self.id)]))\n if self.stage_id.id in (8, 16) and file_attached == 0:\n msg = msg + ' - Upload at least one file \\n'\n ##\n if self.stage_id.id == 2 and call_attempt == 0:\n msg = msg + ' - Call Attempt \\n'\n\n if self.stage_id.id == 3 and call_pitch == 0:\n msg = msg + ' - Call Pitch \\n'\n\n if self.stage_id.id == 9 and self.date_call_back_one == False:\n msg = msg + ' - Date (callback) '\n\n if self.stage_id.id == 10 and self.date_meeting_set == False:\n msg = msg + ' - Date (meeting set) \\n'\n\n if self.stage_id.id == 6 and self.date_preagreement == False:\n msg = msg + ' - Date (pre_agreement) \\n'\n\n ## individual and company contact\n if self.stage_id.id in (8,16) and self.mobile == False:\n msg = msg + ' - Mobile \\n'\n if self.stage_id.id in (8,16) and self.email == False:\n msg = msg + ' - Email \\n'\n if self.stage_id.id in (8, 16) and self.street == False:\n msg = msg + ' - Street in Adress \\n'\n if self.stage_id.id in (8,16) and self.lang == False:\n msg = msg + ' - Language \\n'\n if self.stage_id.id in (8, 16) and self.business_developer_id == False:\n msg = msg + ' - Business Developer \\n'\n if self.stage_id.id in (8,16) and self.vat == False:\n msg = msg + ' - TIN \\n'\n\n ## individual contact\n if self.stage_id.id in (8,16) and self.parent_id and self.parent_id.street== False:\n msg = msg + ' - Invoicing Address (Company Adress) \\n'\n if self.stage_id.id in (8,16) and self.inami == False:\n msg = msg + ' - INAMI \\n'\n if self.stage_id.id in (8,16) and self.subscription_type == False:\n msg = msg + ' - Subscription Type \\n'\n if self.stage_id.id in (8,16) and not self.title and self.is_company != True:\n msg = msg + ' - Title \\n'\n if self.stage_id.id in (8,16) and self.specialization == False:\n msg = msg + ' - Specialization \\n'\n ### Prospection process\n if self.stage_id.id in (8,16) and self.date_signed == False:\n msg = msg + ' - Date(Signed) \\n'\n if self.stage_id.id in (8, 16) and self.bd_signed == False:\n msg = msg + ' - Business Developer (Signed) \\n'\n if self.stage_id.id in (8, 16) and self.comment_signed == False:\n msg = msg + ' - Comment (Signed) \\n'\n\n ### Subscription details\n if self.stage_id.id in (8,16) and self.subscription_month == False:\n msg = msg + ' - Monthly subscription \\n'\n if self.stage_id.id in (8,16) and self.subscription_commitment == False:\n msg = msg + ' - Commitment \\n'\n if self.stage_id.id in (8,16) and self.subscription_upfront_payment == False:\n msg = msg + ' - Upfront Payment \\n'\n if self.stage_id.id in (8,16) and self.subscription_upfront_turnover == False:\n msg = msg + ' - Upfront turnover \\n'\n if self.stage_id.id in (8,16) and self.subsciption_part_condition == False:\n msg = msg + ' - Particular Conditions \\n'\n\n ## stage activated and only individuals\n if self.stage_id.id == 16 and self.doctor_admin == False:\n msg = msg + ' - Doctor AdminID \\n'\n ### stage account managment\n if self.stage_id.id == 16 and self.first_email == False:\n msg = msg + ' - 1st email (activation) \\n'\n if self.stage_id.id == 16 and self.service_completed == False:\n msg = msg + ' - Services completed \\n'\n if self.stage_id.id == 16 and self.price_completed == False:\n msg = msg + ' - Prices completed \\n'\n if self.stage_id.id == 16 and self.cv_completed == False:\n msg = msg + ' - CV/experiences completed \\n'\n if self.stage_id.id == 16 and self.duration_completed == False:\n msg = msg + ' - Duration completed \\n'\n if self.stage_id.id == 16 and self.personal_message_completed == False:\n msg = msg + ' - Personal message completed \\n'\n if self.stage_id.id == 16 and self.profile_picture == False:\n msg = msg + ' - Profile picture \\n'\n if self.stage_id.id == 16 and self.photo_practice == False:\n msg = msg + ' - Photo Practice \\n'\n if self.stage_id.id == 16 and self.marketing_kit == False:\n msg = msg + ' - Marketing kit \\n'\n if self.stage_id.id == 16 and self.synchronisation_completed == False:\n msg = msg + ' - Synchronization \\n'\n if self.stage_id.id == 16 and self.backlink == False:\n msg = msg + ' - Backlink \\n'\n if self.stage_id.id == 16 and self.google_profile == False:\n msg = msg + ' - Google profile \\n'\n if self.stage_id.id == 16 and self.voicemail == False:\n msg = msg + ' - Voicemail \\n'\n if self.stage_id.id == 16 and self.mail_signature == False:\n msg = msg + ' - Mail signature \\n'\n if self.stage_id.id == 16 and self.email_to_patient == False:\n msg = msg + ' - Email to patient \\n'\n if self.stage_id.id == 16 and self.translation == False:\n msg = msg + ' - Translation \\n'\n if self.stage_id.id == 16 and self.business_card == False:\n msg = msg + ' - Manuel Sent \\n'\n if self.stage_id.id == 16 and self.manuel_sent == False:\n msg = msg + ' - Business cards \\n'\n if self.stage_id.id == 16 and self.widget == False:\n msg = msg + ' - Widget \\n'\n if self.stage_id.id == 16 and self.voice_mail == False:\n msg = msg + ' - Voicemail + email signature \\n'\n if self.stage_id.id == 16 and self.website_ok == False:\n msg = msg + ' - Website \\n'\n if self.stage_id.id == 16 and self.customer_service_number == False:\n msg = msg + ' - Customer service number on google profile \\n'\n if self.stage_id.id == 16 and self.website_backlink == False:\n msg = msg + ' - Backlink on website \\n'\n\n ## Lost paying, tab lost\n if self.stage_id.id == 17 and self.date_lost == False:\n msg = msg + ' - Lost Date \\n'\n if self.stage_id.id == 17 and self.reason_lost == False:\n msg = msg + ' - Lost Reason \\n'\n\n\n\n\n ##\n if msg:\n raise ValidationError('To move to this step you first need to fill those fields : \\n' + msg)\n\n return {}",
"def increment_stage_in_forms(forms):\n for index, form in enumerate(forms.all(), 1):\n form.stage = index\n form.save(update_fields=['stage'])",
"def default_get(self, cr, uid, fields, context=None):\n if context is None:\n context = {}\n record_ids = context.get('active_ids', False)\n res = super(crm_lead_stage, self).default_get(cr, uid, fields, context=context)\n\n if record_ids:\n opp_ids = []\n opps = self.pool.get('crm.lead').browse(cr, uid, record_ids, context=context)\n for opp in opps:\n opp_ids.append(opp.id)\n if 'lead_ids' in fields:\n res.update({'lead_ids': opp_ids})\n\n return res",
"def stage(self, id, title = None):\r\n if id != self.lastStage:\r\n if title:\r\n REGISTRY['CIF'].write('Step %s: %s' % (id, title))\r\n self.callstack.setStage(id,title)\r\n else:\r\n REGISTRY['CIF'].write('Step %s' % id)\r\n self.callstack.setStage(id,\" \")\r\n \r\n if self.stepByStep:\r\n cmd = MsgHelper.createMessage(Messages.CMD_PAUSE)\r\n self.mailbox.push( cmd, high_priority = True )\r\n self.lastStage = id",
"def setup(self, stage: Optional[str] = None) -> None:",
"def _stage(self):\n\n pass",
"def stage(self, stage: osbuild.Stage):",
"def _read_group_stage_ids(self, stages, domain, order):\n stage_ids = self.env['salon.stage'].search([])\n return stage_ids",
"def prepare_staging_area(sr_path, staging_path, vdi_uuids, seq_num=0):\n for vdi_uuid in vdi_uuids:\n source = os.path.join(sr_path, \"%s.vhd\" % vdi_uuid)\n link_name = os.path.join(staging_path, \"%d.vhd\" % seq_num)\n _link(source, link_name)\n seq_num += 1",
"def update_tracking_tool(self, new):\n print(f\"Update tracking_tool in preproc/reg stages to {new}\")\n self.stages[\"Preprocessing\"].config.tracking_tool = new\n self.stages[\"Registration\"].config.tracking_tool = new",
"def step_workflow(self):\n from odoo.addons.vneuron_workflow_odoo import workflow\n for res_id in self.ids:\n workflow.trg_write(self._uid, self._name, res_id, self._cr)\n return True",
"def update_preprocessing_act(self, new):\n print(f\"Update act_tracking in preproc/reg stages to {new}\")\n self.stages[\"Preprocessing\"].config.act_tracking = new\n self.stages[\"Registration\"].config.act_tracking = new\n if not new:\n self.stages[\"Preprocessing\"].config.gmwmi_seeding = False\n self.stages[\"Registration\"].config.gmwmi_seeding = False",
"def stage(self, stage):\n self._stage = stage\n self._layer = Sdf.Layer.CreateAnonymous()\n self._stage.GetSessionLayer().subLayerPaths.append(self._layer.identifier)",
"def set_stage(stage):\n try:\n filename = os.path.join(get_var('SITE'), \".stage\")\n f = open(filename, \"w\")\n f.write(\"%s\\n\" % stage)\n f.close()\n logger.debug(\"set stage: %s\" % (stage))\n except:\n raise AssertionError(\"Unable to save setup/teardown stage! %s\" % (sys.exc_info()[1]))\n return stage",
"def stage(self):\n pass",
"def expand_sdf(stages, context):\n # type: (Iterable[Stage], TransformContext) -> Iterator[Stage]\n for stage in stages:\n transform = only_transform(stage.transforms)\n if transform.spec.urn == common_urns.primitives.PAR_DO.urn:\n\n pardo_payload = proto_utils.parse_Bytes(\n transform.spec.payload, beam_runner_api_pb2.ParDoPayload)\n\n if pardo_payload.restriction_coder_id:\n\n def copy_like(protos, original, suffix='_copy', **kwargs):\n if isinstance(original, str):\n key = original\n original = protos[original]\n else:\n key = 'component'\n new_id = unique_name(protos, key + suffix)\n protos[new_id].CopyFrom(original)\n proto = protos[new_id]\n for name, value in kwargs.items():\n if isinstance(value, dict):\n getattr(proto, name).clear()\n getattr(proto, name).update(value)\n elif isinstance(value, list):\n del getattr(proto, name)[:]\n getattr(proto, name).extend(value)\n elif name == 'urn':\n proto.spec.urn = value\n elif name == 'payload':\n proto.spec.payload = value\n else:\n setattr(proto, name, value)\n if 'unique_name' not in kwargs and hasattr(proto, 'unique_name'):\n proto.unique_name = unique_name(\n {p.unique_name\n for p in protos.values()},\n original.unique_name + suffix)\n return new_id\n\n def make_stage(base_stage, transform_id, extra_must_follow=()):\n # type: (Stage, str, Iterable[Stage]) -> Stage\n transform = context.components.transforms[transform_id]\n return Stage(\n transform.unique_name, [transform],\n base_stage.downstream_side_inputs,\n union(base_stage.must_follow, frozenset(extra_must_follow)),\n parent=base_stage.name,\n environment=base_stage.environment)\n\n main_input_tag = only_element(\n tag for tag in transform.inputs.keys()\n if tag not in pardo_payload.side_inputs)\n main_input_id = transform.inputs[main_input_tag]\n element_coder_id = context.components.pcollections[\n main_input_id].coder_id\n # Tuple[element, restriction]\n paired_coder_id = context.add_or_get_coder_id(\n beam_runner_api_pb2.Coder(\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=common_urns.coders.KV.urn),\n component_coder_ids=[\n element_coder_id, pardo_payload.restriction_coder_id\n ]))\n # Tuple[Tuple[element, restriction], double]\n sized_coder_id = context.add_or_get_coder_id(\n beam_runner_api_pb2.Coder(\n spec=beam_runner_api_pb2.FunctionSpec(\n urn=common_urns.coders.KV.urn),\n component_coder_ids=[\n paired_coder_id,\n context.add_or_get_coder_id(\n # context can be None here only because FloatCoder does\n # not have components\n coders.FloatCoder().to_runner_api(None), # type: ignore\n 'doubles_coder')\n ]))\n\n paired_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_paired',\n coder_id=paired_coder_id)\n pair_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/PairWithRestriction',\n urn=common_urns.sdf_components.PAIR_WITH_RESTRICTION.urn,\n outputs={'out': paired_pcoll_id})\n\n split_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_split',\n coder_id=sized_coder_id)\n split_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/SplitAndSizeRestriction',\n urn=common_urns.sdf_components.SPLIT_AND_SIZE_RESTRICTIONS.urn,\n inputs=dict(transform.inputs, **{main_input_tag: paired_pcoll_id}),\n outputs={'out': split_pcoll_id})\n\n reshuffle_stage = None\n if common_urns.composites.RESHUFFLE.urn in context.known_runner_urns:\n reshuffle_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_reshuffle',\n coder_id=sized_coder_id)\n reshuffle_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/Reshuffle',\n urn=common_urns.composites.RESHUFFLE.urn,\n payload=b'',\n inputs=dict(transform.inputs, **{main_input_tag: split_pcoll_id}),\n outputs={'out': reshuffle_pcoll_id})\n reshuffle_stage = make_stage(stage, reshuffle_transform_id)\n else:\n reshuffle_pcoll_id = split_pcoll_id\n reshuffle_transform_id = None\n\n if context.is_drain:\n truncate_pcoll_id = copy_like(\n context.components.pcollections,\n main_input_id,\n '_truncate_restriction',\n coder_id=sized_coder_id)\n # Lengthprefix the truncate output.\n context.length_prefix_pcoll_coders(truncate_pcoll_id)\n truncate_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/TruncateAndSizeRestriction',\n urn=common_urns.sdf_components.TRUNCATE_SIZED_RESTRICTION.urn,\n inputs=dict(\n transform.inputs, **{main_input_tag: reshuffle_pcoll_id}),\n outputs={'out': truncate_pcoll_id})\n process_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/Process',\n urn=common_urns.sdf_components.\n PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,\n inputs=dict(\n transform.inputs, **{main_input_tag: truncate_pcoll_id}))\n else:\n process_transform_id = copy_like(\n context.components.transforms,\n transform,\n unique_name=transform.unique_name + '/Process',\n urn=common_urns.sdf_components.\n PROCESS_SIZED_ELEMENTS_AND_RESTRICTIONS.urn,\n inputs=dict(\n transform.inputs, **{main_input_tag: reshuffle_pcoll_id}))\n\n yield make_stage(stage, pair_transform_id)\n split_stage = make_stage(stage, split_transform_id)\n yield split_stage\n if reshuffle_stage:\n yield reshuffle_stage\n if context.is_drain:\n yield make_stage(\n stage, truncate_transform_id, extra_must_follow=[split_stage])\n yield make_stage(stage, process_transform_id)\n else:\n yield make_stage(\n stage, process_transform_id, extra_must_follow=[split_stage])\n\n else:\n yield stage\n\n else:\n yield stage",
"def stage_set_send_note(self, cr, uid, ids, stage_id, context=None):\n stage_name = self.pool.get('crm.case.stage').name_get(cr, uid, [stage_id], context=context)[0][1]\n return self.message_post(cr, uid, ids, body= _(\"Stage changed to <b>%s</b>.\") % (stage_name), context=context)",
"def stage(self, stage_id):\r\n return pipelines.Stage(self, stage_id)",
"def stages(self, stages):\n if stages is None:\n self._stages = None\n else:\n self._stages = stages if isinstance(stages, list) else [stages] * len(self.pidevice.allaxes)\n debug('ControllerStartup.stages = %s', itemstostr(self._stages))",
"def step_impl_the_ru_is_set_to(context, business_id):\n context.bdd_helper.message_data[\"business_id\"] = business_id",
"def create_stage(self, ApiId: str, StageName: str, AccessLogSettings: Dict = None, ClientCertificateId: str = None, DefaultRouteSettings: Dict = None, DeploymentId: str = None, Description: str = None, RouteSettings: Dict = None, StageVariables: Dict = None) -> Dict:\n pass",
"def on12Lead(self, event): # wxGlade: DAQPanel.<event_handler>\n CreateDialog2 = Lead12Dialog2(self,self)\n CreateDialog2.ShowModal()",
"def prepareFinishSlot(self):\r\n \r\n self.lockIndex = self._wizard.targetIndexes[0]\r\n self._targetRepositoryModel.lock([self.lockIndex])",
"def setValuesInStep(\n self, stepName: str, interactionProperty: str = \"\", contactControls: str = \"\"\n ):\n pass",
"def setValuesInStep(\n self, stepName: str, interactionProperty: str = \"\", contactControls: str = \"\"\n ):\n pass",
"def Move_Stage(self):\n for i in range(3):\n if self.set_pos[i] == 0:\n continue\n print \"Moving stage %s by %s steps\\n\"%(self.POS_NAME[i], self.set_pos[i])\n self.ser.write('F,C'+self.STEPPER_NAME[i]+str(self.set_pos[i])+',R')\n time.sleep(0.5)\n time.sleep(0.5)\n return",
"def set_fill_stages(self: _SelfType, val: Tuple[str]) -> _SelfType:\n self._fill_stages = val\n return self",
"def setup_pivot():\n for piv_switcher in get_one_switcher():\n piv_switcher.setup()",
"def test_workflows_id_replace_post(self):\n pass",
"def post_stage(self):\n\n\t\tif self.stage == self.stages.declarations:\n\t\t\t# Prepare for output waveform generators.\n\t\t\tfor output in [var for var, type in self.variables.items() if type == 'output']:\n\t\t\t\tself.generators[output] = None\n\t\t\t\tself.waveforms[output] = None\n\n\t\t\t# Generate labels for all necessary values.\n\t\t\tself.all_values = set()\n\t\t\tfor name, type in self.variables.items():\n\t\t\t\tif type == 'pulse':\n\t\t\t\t\tfor attr in ['amplitude', 'length', 'shape']:\n\t\t\t\t\t\tself.all_values.add((name, attr))\n\t\t\t\telif type == 'acq_marker':\n\t\t\t\t\tfor attr in ['marker_num', 'output']:\n\t\t\t\t\t\tself.all_values.add((name, attr))\n\t\t\t\telif type != 'output':\n\t\t\t\t\tself.all_values.add((name,))\n\t\telif self.stage == self.stages.waveforms:\n\t\t\t# Finalize waveform creation.\n\t\t\tfor output in self.generators:\n\t\t\t\tself.waveforms[output] = self.generators[output].waveform"
] | [
"0.58470714",
"0.55518115",
"0.5546699",
"0.5379987",
"0.53570205",
"0.5289893",
"0.5229131",
"0.5181128",
"0.5130612",
"0.5128466",
"0.50979745",
"0.5055515",
"0.505321",
"0.50443345",
"0.50421935",
"0.50367343",
"0.503288",
"0.49982783",
"0.497238",
"0.49644795",
"0.49617615",
"0.49610013",
"0.49151117",
"0.4867824",
"0.4867824",
"0.4816936",
"0.48104182",
"0.4807522",
"0.48037204",
"0.47937816"
] | 0.64389664 | 0 |
A view to add a new portfolio project | def add_project(request):
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
if request.method == 'POST':
form = ProjectForm(request.POST, request.FILES)
if form.is_valid():
project = form.save()
messages.success(request, 'Project added successfully!')
return redirect(reverse('portfolio'))
else:
messages.error(request, 'Failed to add project.\
# Please ensure the form is valid')
else:
form = ProjectForm()
form = ProjectForm()
template = 'portfolio/add_project.html'
context = {
'form': form,
}
return render(request, template, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_project():\n if request.method == \"POST\":\n result = add_project_to_db(\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n return render_template(\"add_project.html\")",
"def add_project(request):\n\n profile = get_object_or_404(Profile, user=request.user)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n project_form = ProjectForm(request.POST, request.FILES)\n if project_form.is_valid():\n project = project_form.save(commit=False)\n project.owner = profile\n project.save()\n messages.success(request, 'Successfully created project!')\n return redirect(reverse('project_detail', args=[project.id]))\n else:\n messages.error(\n request,\n 'Failed to create project. Please ensure the form is valid'\n )\n\n project_form = ProjectForm()\n\n template = 'gameproject/add_project.html'\n context = {\n 'project_form': project_form,\n }\n\n return render(request, template, context)",
"def newproject_view(request):\n\n # Use to tell to the template that the user want to creat a new project\n is_new = True\n\n # Get all the user. Everyone may be member of the project\n users = User.objects.all()\n\n # If the view received data, try to creat a project\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Save the new project in the database\n form.save(commit=True)\n\n # redirect to the project list display page\n return redirect(\"projects\")\n else:\n # creat an empty form for the template\n form = ProjectForm(request.user)\n\n return render(request, 'newProject.html', locals())",
"def add_project():\n \n if 'username' in session: \n form=ProjectForm()\n \n if request.method == 'POST':\n if form.validate_on_submit():\n user = mongo.db.user.find_one({'username': session['username']})\n mongo.db.projects.insert_one({'username': user['username'],\n 'date': datetime.utcnow(),\n 'title': form.title.data,\n 'deadline': datetime.strptime(form.deadline.data, \"%d/%m/%Y\"),\n 'brief': form.brief.data,\n 'status': \"open\",\n 'note': form.note.data,\n 'user_id': user['_id']\n })\n \n flash('Your project has been created.', 'success')\n return redirect(url_for('projects'))\n \n return render_template('pages/addproject.html', title='New Project', form=form, legend=\"Add a project\")\n \n flash('You need to be logged in to post any content.', 'info')\n return redirect(url_for('login'))",
"def get_add_project_form():\n\n return render_template(\"project_add.html\")",
"def create_project(request):\n if request.method == \"POST\":\n temp = json.loads(request.body)\n form = ProjectForm(temp)\n\n # check whether it's valid:\n if form.is_valid():\n prj_obj = form.save(commit=False)\n # prj_obj.description = bleach.clean(prj_obj.description, strip=True)\n # fint the user profile object based on the email in session\n user_profile = UserProfile.objects.get(email=request.session['email'])\n prj_obj.user = user_profile\n # Save the project object - project needs to exist before\n # manytomany field is accessed.\n prj_obj.save()\n # get the list of tag objects to add to project\n tag_objects_list = _get_tags(form.cleaned_data['tags_list'])\n article_object_list = _get_articles(form.cleaned_data['articles'])\n for tag_object in tag_objects_list:\n prj_obj.tags.add(tag_object)\n for article_object in article_object_list:\n prj_obj.articles.add(article_object)\n prj_obj.save()\n return HttpResponse(str(prj_obj.id))\n # return HttpResponseRedirect('/projects/' + str(prj_obj.id))\n else:\n print form.errors.as_data()\n else:\n # Remove when front end updated.\n form = ProjectForm()\n return render(request, 'projects/create_project.html', {'form': form})",
"def get_project_add_form():\n\n return render_template(\"project_add.html\")",
"def post_project():\n\n title = request.form.get('title')\n description = request.form.get('description')\n max_grade = request.form.get('max_grade')\n\n hackbright.make_new_project(title, description, max_grade)\n\n flash(\"Successfully added new project.\")\n\n return redirect(\"/project?title={}\".format(title))",
"def add_project(project):\n print('add_project: ' + str(project))\n try_insert_or_update(models.projects.insert(), # pylint: disable=no-value-for-parameter\n [dict(\n name=project['name'], path=project['name'], active=True, user_id=current_user.id)])\n return",
"def project():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n\n menu = M(c=\"project\")(\n M(\"Projects\", f=\"project\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Locations\", f=\"location\")(\n M(\"Map\", m=\"map\"),\n M(\"Contacts\", f=\"location_contact\"),\n ),\n M(\"Reports\", f=\"location\", m=\"report\")(\n M(\"3W\", f=\"location\", m=\"report\"),\n M(\"Beneficiaries\", f=\"beneficiary\", m=\"report\"),\n #M(\"Indicators\", f=\"indicator\", m=\"report\",\n # check=indicators,\n # ),\n #M(\"Indicators over Time\", f=\"indicator\", m=\"timeplot\",\n # check=indicators,\n # ),\n M(\"Funding\", f=\"organisation\", m=\"report\"),\n ),\n M(\"Import\", f=\"project\", m=\"import\", p=\"create\", restrict=[ADMIN])(\n M(\"Import Projects\", m=\"import\", p=\"create\"),\n M(\"Import Project Organizations\", f=\"organisation\",\n m=\"import\", p=\"create\"),\n M(\"Import Project Communities\", f=\"location\",\n m=\"import\", p=\"create\"),\n ),\n M(\"Activity Types\", f=\"activity_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Beneficiary Types\", f=\"beneficiary_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Sectors\", f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Themes\", f=\"theme\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )\n\n return menu",
"def portfolio_detail():\n return render_template('portfolio/portfolio.html')",
"def add_project(project, taglist):\n if anonymize:\n import random\n project['name'] = 'Anonimized Project ' + str(project['id'])[-3:]\n project['client'] = 'Anonimized Client'\n\n wf.add_item(title=project['name'],\n subtitle='Client: ' +\n project['client'] +\n ' Hit ENTER to show menu, press ALT for more info.',\n modifier_subtitles={\n 'alt': 'Tags: ' + ', '.join(taglist),\n },\n arg=str(project['id']),\n valid=True,\n icon='icons/project_{0}.png'.format(\n project['project_state']).lower(),\n copytext=project['name'])",
"def get_projects_route():\n response_object = {'status': 'success'}\n if request.method == 'POST':\n post_data = request.get_json()\n if post_data is not None:\n add_project(post_data)\n response_object['message'] = 'Project added!'\n else:\n response_object['projects'] = get_projects()\n return jsonify(response_object)",
"def edit_project(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n # check whether the user is the one who created this project\n \n if project.user.email != request.session['email']:\n return HttpResponseRedirect('/projects/'+str(project_id))\n else:\n if request.method == \"POST\":\n temp = json.loads(request.body)\n form = ProjectForm(temp, instance=project)\n # form = ProjectForm(request.POST, instance=project)\n # check whether it's valid:\n if form.is_valid():\n #clear any previously stored tags to fix the bug \n #where we remove the tags and its not reflected\n try:\n project.tags.clear()\n project.articles.clear()\n except:\n pass\n m = form.save(commit=False)\n # m.description = bleach.clean(m.description, strip=True)\n m.save()\n tag_objects_list = _get_tags(form.cleaned_data['tags_list'])\n article_object_list = _get_articles(form.cleaned_data['articles'])\n for tag_object in tag_objects_list:\n m.tags.add(tag_object)\n for article_object in article_object_list:\n m.articles.add(article_object)\n m.save()\n # return HttpResponseRedirect('/projects/' + str(m.id))\n # return project_detail(request, m.id)\n return HttpResponse(str(m.id))\n else:\n return render(request, 'projects/edit_project.html',\n {'project': project})\n # return render(request, 'projects/error_edit.html', {'form': form})\n else:\n return render(request, 'projects/edit_project.html',\n {'project': project})\n return project_detail(request, project_id)",
"def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html",
"def portfolio(request):\n projects = Project.objects.all()\n categories = None\n\n if request.GET:\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n projects = projects.filter(category__name__in=categories)\n categories = ProjectCategory.objects.filter(name__in=categories)\n\n context = {\n 'projects': projects,\n 'current_categories': categories,\n }\n\n return render(request, 'portfolio/portfolio.html', context)",
"def portfolio():\n projects = get_projects()\n for project in projects:\n unicode_body = project[\"description\"].decode(\"utf-8\")\n html_body = markdown.markdown(unicode_body)\n safe_html_body = Markup(html_body)\n project[\"description\"] = safe_html_body\n context = {\n \"projects\": projects\n }\n return render_template(\"portfolio.html\", **context)",
"def create_project(self, **kwargs):\n _url = f\"{self.base_url}/projects\"\n if \"name\" not in kwargs:\n raise ValueError(\"Parameter 'name' is mandatory\")\n return self.http_call(\"post\", _url, json_data=kwargs).json()",
"def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())",
"def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)",
"def add_project(self, project):\n c = self.conn.cursor()\n cursor = c.execute(\"INSERT INTO projects VALUES (null, ?, ?, ?, ?)\", (project['owner'],\n project['title'],\n datetime.now(), datetime.now(),))\n\n self.conn.commit()\n project_id = cursor.lastrowid\n\n self.conn.cursor().execute(\"INSERT INTO users_projects VALUES (?,?)\", (project['owner'], project_id),)\n self.conn.commit()\n return self.get_project(project_id)",
"def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)",
"def add_portfolio(self, portfolio):\n self.portfolios.append(portfolio)",
"def add(self, name, project):\n self.projects[name] = project",
"def create_project(projectname):\n auth_id = request.get_json().get(\"auth_id\")\n storage_accesses = request.get_json().get(\"storage_accesses\", [])\n response = jsonify(\n admin.create_project(\n current_app.scoped_session(), projectname, auth_id, storage_accesses\n )\n )\n return response",
"def create_project():\n client = RequestManager()\n project_name = \"\".join(choices(string.ascii_letters + string.digits, k=10))\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects\")\n body = {\"name\": project_name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n STORED_ID['project_id'] = response.json()['id']",
"def create_project(self, **kwargs):\n save = kwargs.get('save', True) \n if kwargs.has_key('save'):\n del(kwargs['save'])\n\n index = self.object_index()\n defaults = dict(slug = \"test-project-%s\" % index,\n basecamp_url = \"https://foo.basecamphq.com/projects/%s/log\" % index)\n defaults.update(kwargs)\n p = Project(**defaults)\n\n if save:\n p.save()\n self.assert_(p.id)\n return p",
"def project_clone(request, proj_id=None):\n\n if not proj_id or not request.user.is_authenticated():\n raise Http404\n\n project = get_object_or_404(Project, id=proj_id)\n\n if project.user != request.user and project.is_private:\n raise Http404\n\n project.pk = None\n project.user = request.user\n project.save()\n\n for scenario in Scenario.objects \\\n .filter(project_id=proj_id) \\\n .order_by('created_at'):\n scenario.pk = None\n scenario.project = project\n scenario.save()\n\n return redirect('/project/{0}'.format(project.id))",
"def create_project_form(request):\n \n # First we check to see the site has been set up, otherwise we throw the user to the config screen\n if not bool(os.path.isdir(Project.project_options.repository_directory)):\n request.user.message_set.create(message=\"The site has not been set up yet. Log in as your admin user and create your settings!\")\n return HttpResponseRedirect(reverse('site-config'))\n \n if request.is_ajax():\n template ='project/project_create_ajax.html'\n else:\n template = 'project/project_create.html'\n \n # Lets check if this form is being shown or processed\n if request.method == \"POST\":\n # We're processing the form, so lets create the instance\n form = NewProjectForm(request.POST, auto_id=False)\n # The form is correct, lets proceeed.\n if form.is_valid():\n # Lets check the user has conformed to a sites T&C's\n if form.cleaned_data['t_and_c'] == True:\n # Create the project instance\n project = Project(\n project_id = string.lower(form.cleaned_data['project_id']),\n project_name = form.cleaned_data['project_name'],\n short_description = form.cleaned_data['short_description'],\n full_description = form.cleaned_data['full_description'],\n project_manager = request.user,\n hgweb_style = form.cleaned_data.get('hgweb_style', ''),\n project_icon = form.cleaned_data['project_icon'],\n )\n # Ok, we're all good, so lets save.\n project.save()\n # We'll tell the user that there site has been saved\n request.user.message_set.create(message=_(\"The project \" + form.cleaned_data['project_name'] + \" has been created\"))\n if request.is_ajax():\n return HttpResponse(\n \"{'success': 'true', 'url': '\" + reverse('project-detail', kwargs={'slug':form.cleaned_data['project_id']}) + \"', 'project': \" + json_encode(project) + \"}\"\n , mimetype=\"application/json\")\n else:\n return HttpResponseRedirect(reverse('project-detail', kwargs={'slug': form.cleaned_data['project_id']}))\n else:\n return render_to_response(template,\n {\n 'form':form.as_table(),\n }, context_instance=RequestContext(request)\n )\n #return HttpResponseRedirect(reverse('project-detail', kwargs={'slug':form.cleaned_data['name_short']}))\n else:\n form = NewProjectForm()\n is_auth = request.user.is_authenticated()\n \n return render_to_response(template,\n {\n 'form':form.as_table(),\n 'is_auth': is_auth\n }, context_instance=RequestContext(request)\n )",
"def edit_project_view(request, project_id):\n\n # Use to tell to the template that the user want to edit a project\n is_new = False\n\n # Retrieve the project to be edited or raise an error if this project does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n # Check if the logged in user is allowed to edit this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n\n # Check if the view receive data from the form\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Manually update the field using the data from form\n project.name = form.cleaned_data[\"name\"]\n project.members.set(form.cleaned_data[\"members\"])\n # Save the project. Does not creat a new project as long as the project's id is not modified\n project.save()\n return redirect(\"projects\")\n else:\n form = ProjectForm(user=request.user, instance=project)\n return render(request, 'newProject.html', locals())\n else:\n return redirect(\"projects\")\n return redirect(\"projects\")"
] | [
"0.7893704",
"0.7302018",
"0.7279046",
"0.7119255",
"0.70725876",
"0.7025085",
"0.6988927",
"0.6972085",
"0.6608903",
"0.65972",
"0.659065",
"0.6497519",
"0.64028376",
"0.6389602",
"0.6371995",
"0.63446647",
"0.6344435",
"0.6330495",
"0.63034815",
"0.6294346",
"0.6290368",
"0.6288127",
"0.6288127",
"0.6283533",
"0.62677646",
"0.6265085",
"0.6257913",
"0.62073946",
"0.6194536",
"0.6187606"
] | 0.7959094 | 0 |
A view to edit a portfolio project | def edit_project(request, project_id):
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
project = get_object_or_404(Project, pk=project_id)
if request.method == 'POST':
form = ProjectForm(request.POST, request.FILES, instance=project)
if form.is_valid():
form.save()
messages.success(request, 'Successfully updated project')
return redirect(reverse('portfolio'))
else:
messages.error(request, 'Failed to update project. \
# Please ensure the form is valid.')
else:
form = ProjectForm(instance=project)
messages.info(request, f'You are editing {project.name}')
template = 'portfolio/edit_project.html'
context = {
'form': form,
'project': project,
}
return render(request, template, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit_project(project_id):\n \n if 'username' in session: \n project = mongo.db.projects.find_one_or_404(\n {'_id': ObjectId(project_id)})\n form=ProjectForm()\n form.title.data = project['title']\n form.status.data = project['status']\n form.deadline.data = project['deadline'].strftime('%d/%m/%Y')\n form.brief.data = project['brief']\n form.note.data = project['note']\n return render_template('pages/editproject.html', form=form, project=project, legend='Edit your project')",
"def edit_project_view(request, project_id):\n\n # Use to tell to the template that the user want to edit a project\n is_new = False\n\n # Retrieve the project to be edited or raise an error if this project does not exist\n project = get_object_or_404(Projet, id=project_id)\n\n # Check if the logged in user is allowed to edit this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n\n # Check if the view receive data from the form\n if request.method == \"POST\":\n form = ProjectForm(request.user, request.POST)\n if form.is_valid():\n # Manually update the field using the data from form\n project.name = form.cleaned_data[\"name\"]\n project.members.set(form.cleaned_data[\"members\"])\n # Save the project. Does not creat a new project as long as the project's id is not modified\n project.save()\n return redirect(\"projects\")\n else:\n form = ProjectForm(user=request.user, instance=project)\n return render(request, 'newProject.html', locals())\n else:\n return redirect(\"projects\")\n return redirect(\"projects\")",
"def edit_project(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n # check whether the user is the one who created this project\n \n if project.user.email != request.session['email']:\n return HttpResponseRedirect('/projects/'+str(project_id))\n else:\n if request.method == \"POST\":\n temp = json.loads(request.body)\n form = ProjectForm(temp, instance=project)\n # form = ProjectForm(request.POST, instance=project)\n # check whether it's valid:\n if form.is_valid():\n #clear any previously stored tags to fix the bug \n #where we remove the tags and its not reflected\n try:\n project.tags.clear()\n project.articles.clear()\n except:\n pass\n m = form.save(commit=False)\n # m.description = bleach.clean(m.description, strip=True)\n m.save()\n tag_objects_list = _get_tags(form.cleaned_data['tags_list'])\n article_object_list = _get_articles(form.cleaned_data['articles'])\n for tag_object in tag_objects_list:\n m.tags.add(tag_object)\n for article_object in article_object_list:\n m.articles.add(article_object)\n m.save()\n # return HttpResponseRedirect('/projects/' + str(m.id))\n # return project_detail(request, m.id)\n return HttpResponse(str(m.id))\n else:\n return render(request, 'projects/edit_project.html',\n {'project': project})\n # return render(request, 'projects/error_edit.html', {'form': form})\n else:\n return render(request, 'projects/edit_project.html',\n {'project': project})\n return project_detail(request, project_id)",
"def update_project(id):\n if request.method == \"POST\":\n result = update_project_to_db(\n id,\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n project = get_project(id)\n return render_template(\"edit_project.html\", **project)",
"def edit_project(request, game_project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n game_project = get_object_or_404(GameProject, pk=game_project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if game_project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n game_project_form = ProjectForm(\n request.POST,\n request.FILES,\n instance=game_project\n )\n if game_project_form.is_valid():\n game_project_form.save(commit=False)\n game_project.owner = profile\n game_project.total_amount = 0\n for order in Order.objects.filter(\n game_project=game_project).filter(status='PA'):\n game_project.total_amount += order.donation_item.amount\n game_project.save()\n messages.success(request, 'Successfully updated project!')\n return redirect(reverse('project_detail', args=[game_project.id]))\n else:\n messages.error(\n request,\n 'Failed to update project. Please ensure the form is valid.'\n )\n else:\n game_project_form = ProjectForm(instance=game_project)\n messages.info(request, f'You are editing {game_project.title}')\n\n template = 'gameproject/edit_project.html'\n context = {\n 'game_project_form': game_project_form,\n 'game_project': game_project,\n }\n\n return render(request, template, context)",
"def portfolio_detail():\n return render_template('portfolio/portfolio.html')",
"def project_detail(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n project.description = markdown.markdown(bleach.clean(project.description, strip=True), extensions=['markdown.extensions.fenced_code'])\n p2 = Project.objects.get(pk=project_id)\n user_profile = UserProfile.objects.get(email=request.session['email'])\n submissions_list = Submission.objects.filter(project=project)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n context = {'project': project, 'submissions_list':submissions_list, 'current_user': request.session['email'], 'user_profile': user_profile}\n return render(request, 'projects/details.html', context)",
"def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)",
"def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html",
"def user_project_view(cls, user, project):\r\n pass",
"def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())",
"def add_project():\n if request.method == \"POST\":\n result = add_project_to_db(\n request.form[\"title\"],\n request.form[\"link\"],\n request.form[\"description\"]\n )\n flash(result)\n return redirect(url_for(\"portfolio\"))\n else:\n return render_template(\"add_project.html\")",
"def add_project(request):\n if not request.user.is_superuser:\n messages.error(request, 'Sorry, only store owners can do that.')\n return redirect(reverse('home'))\n\n if request.method == 'POST':\n form = ProjectForm(request.POST, request.FILES)\n if form.is_valid():\n project = form.save()\n messages.success(request, 'Project added successfully!')\n return redirect(reverse('portfolio'))\n else:\n messages.error(request, 'Failed to add project.\\\n # Please ensure the form is valid')\n else:\n form = ProjectForm()\n\n form = ProjectForm()\n template = 'portfolio/add_project.html'\n context = {\n 'form': form,\n }\n\n return render(request, template, context)",
"def updateProjects(request):\n\n updater = ProjectUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")",
"def portfolio():\n projects = get_projects()\n for project in projects:\n unicode_body = project[\"description\"].decode(\"utf-8\")\n html_body = markdown.markdown(unicode_body)\n safe_html_body = Markup(html_body)\n project[\"description\"] = safe_html_body\n context = {\n \"projects\": projects\n }\n return render_template(\"portfolio.html\", **context)",
"def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})",
"def edit_project_activated(self):\n if self.project:\n self.edit_project(EDIT)\n else:\n QMessageBox.warning(self, programName, \"There is no project to edit\")",
"def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)",
"def update(self, request, pk=None):\n lot = Lot.objects.get(pk=request.data[\"lotId\"])\n\n project = Project.objects.get(pk=pk)\n project.name = request.data[\"name\"]\n project.estimatedCost = request.data[\"estimatedCost\"]\n project.estimatedCompletionDate = request.data[\"estimatedCompletionDate\"]\n #project.projectNote = Note.objects.get(pk=request.data['projectNote'])\n\n project.lotId = lot\n project.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)",
"def edit(request):\n if 'form.submitted' in request.params:\n # delete old post\n title = request.params['title']\n name = title_to_name(title)\n\n if not name or DBSession.query(Post).filter(Post.name==name).count():\n # this should be a popup ajaxy box\n return Response(\"Name %s is in use, choose a different title\" % name, content_type='text/plain', status_int=500)\n\n body = request.params['body']\n post = Post(title, body, name)\n DBSession.add(post)\n return HTTPFound(location = request.route_url('view_post', postname=name))\n\n save_url = request.route_url('edit_post')\n post = DBSession.query(Post).filter(Post.name==name).first()\n return environment_factory(post=post, save_url=save_url)",
"def user_project_view(cls, user, project):\n pass",
"def projectdetails(http_request, project_id=0):\n\tp = get_object_or_404(Project, pk=project_id)\n\treturn render_to_response('project_detail.html', {'project': p})",
"def portfolio(request):\n projects = Project.objects.all()\n categories = None\n\n if request.GET:\n if 'category' in request.GET:\n categories = request.GET['category'].split(',')\n projects = projects.filter(category__name__in=categories)\n categories = ProjectCategory.objects.filter(name__in=categories)\n\n context = {\n 'projects': projects,\n 'current_categories': categories,\n }\n\n return render(request, 'portfolio/portfolio.html', context)",
"def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)",
"def team_edit(team_id):\n if request.method == 'GET':\n team = Team.query.filter_by(team_id=team_id).one()\n return render_template('edit_team.html', team=team)",
"def project():\n\n ADMIN = current.session.s3.system_roles.ADMIN\n\n menu = M(c=\"project\")(\n M(\"Projects\", f=\"project\", m=\"summary\")(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Locations\", f=\"location\")(\n M(\"Map\", m=\"map\"),\n M(\"Contacts\", f=\"location_contact\"),\n ),\n M(\"Reports\", f=\"location\", m=\"report\")(\n M(\"3W\", f=\"location\", m=\"report\"),\n M(\"Beneficiaries\", f=\"beneficiary\", m=\"report\"),\n #M(\"Indicators\", f=\"indicator\", m=\"report\",\n # check=indicators,\n # ),\n #M(\"Indicators over Time\", f=\"indicator\", m=\"timeplot\",\n # check=indicators,\n # ),\n M(\"Funding\", f=\"organisation\", m=\"report\"),\n ),\n M(\"Import\", f=\"project\", m=\"import\", p=\"create\", restrict=[ADMIN])(\n M(\"Import Projects\", m=\"import\", p=\"create\"),\n M(\"Import Project Organizations\", f=\"organisation\",\n m=\"import\", p=\"create\"),\n M(\"Import Project Communities\", f=\"location\",\n m=\"import\", p=\"create\"),\n ),\n M(\"Activity Types\", f=\"activity_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Beneficiary Types\", f=\"beneficiary_type\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Sectors\", f=\"sector\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n M(\"Themes\", f=\"theme\", restrict=[ADMIN])(\n M(\"Create\", m=\"create\"),\n ),\n )\n\n return menu",
"def edit(self, **kwargs):\n ...",
"def edit_view(request, title, modelform, instance=None, **kwargs):\n instance_form = modelform(request.POST or None, instance=instance)\n if instance_form.is_valid():\n instance = instance_form.save()\n messages.success(request, _(\"%s was edited.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Edit\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )",
"def update_project(project_id):\n\n project = mongo.db.projects\n project.find_one_and_update({'_id': ObjectId(project_id) },\n {'$set':\n {'title': request.form.get('title'),\n 'status': request.form.get('status'),\n 'deadline': datetime.strptime(request.form.get('deadline'), '%d/%m/%Y'),\n 'note': request.form.get('note'),\n 'brief': request.form.get('brief')}})\n return redirect(url_for('projects'))",
"def get_add_project_form():\n\n return render_template(\"project_add.html\")"
] | [
"0.7728187",
"0.73595434",
"0.7352115",
"0.7184356",
"0.703075",
"0.67557067",
"0.6421052",
"0.6396073",
"0.6380097",
"0.6355604",
"0.6334676",
"0.63148147",
"0.62868273",
"0.6275144",
"0.6263248",
"0.62387496",
"0.61801016",
"0.6175357",
"0.61719286",
"0.61687547",
"0.6160295",
"0.6104489",
"0.60983706",
"0.6026067",
"0.60135716",
"0.6008381",
"0.60005915",
"0.5997544",
"0.59944415",
"0.5984973"
] | 0.7640755 | 1 |
A view to delete a project from the portfolio | def delete_project(request, project_id):
if not request.user.is_superuser:
messages.error(request, 'Sorry, only store owners can do that.')
return redirect(reverse('home'))
project = get_object_or_404(Project, pk=project_id)
project.delete()
messages.success(request, 'Project deleted!')
return redirect(reverse('portfolio')) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_project(id):\n result = delete_project_to_db(id)\n flash(result)\n return redirect(url_for(\"portfolio\"))",
"def delete_project_view(request, id):\n\n # retrieve the project to be deleted through his id. Raise an error if the project does not exist\n project = get_object_or_404(Projet, id=id)\n\n # Check if the logged in user is allowed to delete this project\n if request.user.has_perm('taskmanager.{}_project_permission'.format(project.id)):\n # Eventually delete the project\n project.delete()\n\n return redirect(\"projects\")",
"def delete_project(request, project_id):\n\n profile = get_object_or_404(Profile, user=request.user)\n project = get_object_or_404(GameProject, pk=project_id)\n\n if not profile.is_creator:\n messages.error(request, 'Sorry, only creators can do that.')\n return redirect(reverse('home'))\n if project.owner != profile:\n messages.error(request, 'Sorry, only the project owner can do that.')\n return redirect(reverse('home'))\n\n project = get_object_or_404(GameProject, pk=project_id)\n project.delete()\n messages.success(request, 'Project deleted!')\n return redirect(reverse('all_projects'))",
"def delete_project(projectname):\n response = jsonify(admin.delete_project(current_app.scoped_session(), projectname))\n return response",
"def destroy(self, request, pk=None):\n try:\n project = Project.objects.get(pk=pk)\n project.delete()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n\n except Project.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)",
"def delete_project(project_id):\n \n project = mongo.db.projects\n project.delete_one({'_id': ObjectId(project_id)})\n flash('Your project has been deleted.', 'success')\n return redirect(url_for('projects'))",
"def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)",
"def delete_project(request, project_id):\n try:\n project = Project.objects.get(pk=project_id)\n except Project.DoesNotExist:\n raise Http404(\"Project does not exist\")\n # check whether the user is the one who created this project\n if project.user.email != request.session['email']:\n return HttpResponseRedirect('/projects/' + str(project_id))\n else:\n if request.method == \"POST\":\n if project:\n project.delete()\n return HttpResponseRedirect('/projects/')\n else:\n return render(request, 'projects/delete_project.html',\n {'project': project})\n return render(request, 'projects/delete_project.html', {'project': project})",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def test_projects_delete(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None",
"def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def delete(self, guid):\n if helpers.authorized(self.request.params['UUID'], self.request.params['ATO'], self.request.params['action']):\n # search for the Project and delete if found\n key = db.Key.from_path('Project', int(guid))\n project = db.get(key)\n if not project == None:\n project.delete()\n self.response.set_status(204, \"Deleted\")\n else:\n self.response.set_status(404, \"Not Found\")\n else:\n self.response.set_status(401, \"Not Authorized\")",
"def delete_project(\n name\n):\n\n cmd = dict()\n cmd[\"type_\"] = \"delete_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)",
"def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()",
"def delete_project(self, project_name):\n # type(project_name) == unicode\n project = self.db.get_project_by_name(project_name)\n if not project:\n print(u\"*** Error: The project '{}' was not found.\"\n \"\".format(project_name))\n return\n print('Caution! The related tracking will be deleted as well.{eol}'\n 'Do you really want to delete the project? [y/N] '\n ''.format(eol=os.linesep), end='')\n if not helpers.get_yes_no(default='n'):\n return\n self.db.delete_project_by_name(project_name)\n print(u\"The project '%s' has been deleted.\" % project_name)\n self.set_prompt()",
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def delete_project(project_id):\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n if is_project_manager(project, g.user):\n # delete related tasks\n Task.query.filter_by(project=project).delete()\n #delete related invites\n Invitation.query.filter_by(project=project).delete()\n db_session.delete(project)\n db_session.commit()\n return {\n 'success': True,\n 'result': {},\n 'message': \"Project Deleted Successfully.\",\n }",
"def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))",
"def delete(\n self, url: str\n ) -> pymongo.results.DeleteResult:\n return self._mongo.delete({\n 'url': url\n },\n 'projects'\n )",
"def delete_project(arn=None):\n pass",
"def test_projects_id_delete(self):\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='DELETE')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def project_post_delete(sender, instance, **kwargs):\n instance.url.delete(False)",
"def delete(self, oid):\n path = '/projects/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack project: %s' % truncate(res))\n return True",
"def delete_project(project_id):\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(project_id))\n client.execute_request()",
"def delete(self, *args, **kwargs):\n if 'user' not in kwargs or not args:\n self.raise401()\n\n user = kwargs['user']\n path = parse_path(args[0])\n project = Project.objects(name=path[0], members__in=[user])\n if not project:\n self.raise401()\n try:\n project.delete()\n self.set_status(204)\n self.finish()\n except Exception as e:\n reason = e.message\n self.raise400(reason=reason)",
"def delete_project(self, project_id):\n self._run(\n url_path=\"projects/delete\",\n id=project_id,\n )\n return True",
"def delete_project(self, project_id):\n return self._delete('/projects/{0}'.format(project_id))",
"def deleteProject(self, projectId):\n uri = \"/v1/projects/\" +str(projectId)\n response = self.client.delete(uri)\n return response"
] | [
"0.78291446",
"0.77674896",
"0.7705836",
"0.7590049",
"0.757986",
"0.7458269",
"0.7449735",
"0.7428579",
"0.7415701",
"0.725367",
"0.72272354",
"0.71767354",
"0.7172579",
"0.6984525",
"0.69331306",
"0.69194317",
"0.6911854",
"0.6911854",
"0.6897094",
"0.6887393",
"0.68611",
"0.68117636",
"0.6803068",
"0.67991966",
"0.6786162",
"0.67531735",
"0.6700644",
"0.66726637",
"0.6611343",
"0.6611032"
] | 0.8196082 | 0 |
Find a single database | def find_database(self, name_or_id, instance, ignore_missing=True):
instance = self._get_resource(_instance.Instance, instance)
return self._find(
_database.Database,
name_or_id,
instance_id=instance.id,
ignore_missing=ignore_missing,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_database(self, name):\n try:\n return [db for db in self.list_databases()\n if db.name == name][0]\n except IndexError:\n raise exc.NoSuchDatabase(\"No database by the name '%s' exists.\" %\n name)",
"def searchDatabase(self, name: str) -> Database:\n for db in self._typeCheckerList:\n if db.name.lower() == name.lower():\n return db\n return None",
"def get_database(self, database, instance=None):\n return self._get(_database.Database, database)",
"def get_db(db_name):\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def get_database(self, instance, name):\n return instance.get_database(name)",
"def get_db(db_name):\n client = MongoClient('localhost:27017')\n db = client[db_name]\n return db",
"def _get_database(self, options):\n database_key = options.get('database')\n if not database_key:\n if len(settings.DATABASES) >= 2:\n errmsg = \"Because this project contains more than one database, you\"\n errmsg += \" must specify the --database option.\"\n raise CommandError(errmsg)\n database_key = settings.DATABASES.keys()[0]\n return settings.DATABASES[database_key]",
"def get_database(conn, name):\n\n if conn.hasDatabase(name) is False:\n return conn.createDatabase(name)\n\n return conn[name]",
"def get_database() -> Database:\n db_config = DatabaseConfig(DB_NAME)\n return connect_to_db(db_config)",
"def find_server(message, db):\n db_list = sql.database_list()\n if db in db_list:\n server = db_list[db]\n message.reply(Strings['DATABASE_SERVER'].format(db, server))\n else:\n message.reply(Strings['DATABASE_UNKNOWN'].format(db))",
"def get_database(self, dbid: str, account: str) -> Optional[dict]:\n self._check_connection(check_db=False)\n db_ids = []\n all_dbs = []\n for this_db in self.get_databases():\n if this_db[\"system:resource_name\"][\"@value\"] == dbid:\n db_ids.append(this_db[\"@id\"])\n all_dbs.append(this_db)\n\n resources_ids = []\n for scope in self._dispatch_json(\"get\", self._api)[\"system:role\"][\n \"system:capability\"\n ][\"system:capability_scope\"]:\n if (\n scope[\"@type\"] == \"system:Organization\"\n and scope[\"system:organization_name\"][\"@value\"] == account\n ):\n if type(scope[\"system:resource_includes\"]) is list:\n for resource in scope[\"system:resource_includes\"]:\n resources_ids.append(resource[\"@id\"])\n\n target_db = None\n for target in set(db_ids).intersection(set(resources_ids)):\n target_db = target\n\n for this_db in all_dbs:\n if this_db[\"@id\"] == target_db:\n return this_db",
"def database():\n return conf().database",
"def get_db(self):\n self.logger.info('in get_db()')\n try:\n return self.client[self.db_name]\n except Exception as e:\n self.logger.error(f'Error occurred while getting client {e}')",
"def get_db(db_label):\n defaults = get_defaults()\n db_name = defaults[db_label]\n m = re.match('(\\w+)://.*?/([\\w.]+)', db_name)\n if m is None:\n logger.error(\"Poorly formed db name: %s\" % db_name)\n return\n sqltype = m.groups()[0]\n return DatabaseManager(db_name, sqltype=sqltype, label=db_label)",
"def get_database(self):\n return self.database",
"def get_db(self, dbname, **params):\n return Database(self._db_uri(dbname), server=self, **params)",
"def get_mongo_db(host, port, name):\n client = MongoClient(host, port)\n db = client[name]\n return db",
"def get_db():\n from pymongo import MongoClient\n client = MongoClient('localhost:27017')\n db = client.seattle\n return db",
"def get_database(self, database=None):\n\t\tdatabase = database if database !=None else self.database\n\t\t\n\t\tif self._database is None:\n\t\t\tconn = self.get_connection()\n\t\t\tdb = conn[database]\n\t\t\tself._database = db\n\t\t\n\t\treturn self._database",
"def get_database (name, parent=None):\n if \".\" in name:\n parent, name = name.split(\".\")\n\n if parent is not None:\n if not isinstance(parent, DatabaseFolder):\n parent = globals().get(parent, None)\n\n if parent is None or not isinstance(parent, DatabaseFolder):\n return None\n\n return parent.get(name, None)\n\n return globals().get(name, None)",
"def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")",
"def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")",
"def get_db(db=None):\n if db is None:\n db = ideagenstest\n return get_mongodb(db['url'],\n db['port'],\n db['dbName'],\n db['user'],\n db['pswd'])",
"def get_test_db():\n defaults = get_defaults()\n test_defaults = {k: v for k, v in defaults.items() if 'test' in k}\n key_list = list(test_defaults.keys())\n key_list.sort()\n db = None\n for k in key_list:\n test_name = test_defaults[k]\n m = re.match('(\\w+)://.*?/([\\w.]+)', test_name)\n if m is None:\n logger.warning(\"Poorly formed db name: %s\" % test_name)\n continue\n sqltype = m.groups()[0]\n try:\n db = DatabaseManager(test_name, sqltype=sqltype, label=k)\n db.grab_session()\n except Exception as e:\n logger.error(\"%s didn't work\" % test_name)\n logger.exception(e)\n continue # Clearly this test database won't work.\n logger.info(\"Using test database %s.\" % k)\n break\n if db is None:\n logger.error(\"Could not find any test database names.\")\n return db",
"def get_db():\n if ( g.get( 'db' ) is None ):\n g.db = connect_db()\n\n return g.db.connect()",
"def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict",
"def isDatabase(self, dbName):\n url = '%s/_database/%s' % (self.uri, dbName)\n data, resp = self.execute(method='GET', url=url, decode=True)\n return data",
"def get_db(self, typename):\n return self._dbs[typename]",
"def get_db(database):\n db = getattr(g, '_database', None)\n if db is None:\n intents_db = IntentsDatabaseEngine()\n expressions_db = ExpressionsDatabaseEngine()\n database_dict = {'intents': intents_db,\n 'expressions': expressions_db}\n g._database = db = database_dict\n return db[database]",
"def database_exist(database_name):\n with MongoDBConnection() as mongo:\n database_list = mongo.connection.list_database_names()\n\n exist_flag = True\n if database_name not in database_list:\n print(f'Database {database_name} not found.')\n exist_flag = False\n\n return exist_flag"
] | [
"0.7311903",
"0.6849391",
"0.6839373",
"0.6820903",
"0.6808621",
"0.67496127",
"0.67408377",
"0.6706965",
"0.6702317",
"0.6701318",
"0.66828966",
"0.6620947",
"0.65982574",
"0.6543755",
"0.6513072",
"0.6468301",
"0.64516336",
"0.6444562",
"0.640927",
"0.6397873",
"0.63197",
"0.63197",
"0.6319367",
"0.627529",
"0.62604314",
"0.6252022",
"0.6238203",
"0.62273437",
"0.6213253",
"0.6198132"
] | 0.7636754 | 0 |